diff options
Diffstat (limited to 'arch/powerpc/platforms')
325 files changed, 8796 insertions, 14757 deletions
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig deleted file mode 100644 index 614ea6dc994c..000000000000 --- a/arch/powerpc/platforms/40x/Kconfig +++ /dev/null @@ -1,77 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config ACADIA - bool "Acadia" - depends on 40x - select PPC40x_SIMPLE - select 405EZ - help - This option enables support for the AMCC 405EZ Acadia evaluation board. - -config HOTFOOT - bool "Hotfoot" - depends on 40x - select PPC40x_SIMPLE - select FORCE_PCI - help - This option enables support for the ESTEEM 195E Hotfoot board. - -config KILAUEA - bool "Kilauea" - depends on 40x - select 405EX - select PPC40x_SIMPLE - select PPC4xx_PCI_EXPRESS - select FORCE_PCI - select PCI_MSI - help - This option enables support for the AMCC PPC405EX evaluation board. - -config MAKALU - bool "Makalu" - depends on 40x - select 405EX - select FORCE_PCI - select PPC4xx_PCI_EXPRESS - select PPC40x_SIMPLE - help - This option enables support for the AMCC PPC405EX board. - -config OBS600 - bool "OpenBlockS 600" - depends on 40x - select 405EX - select PPC40x_SIMPLE - help - This option enables support for PlatHome OpenBlockS 600 server - -config PPC40x_SIMPLE - bool "Simple PowerPC 40x board support" - depends on 40x - help - This option enables the simple PowerPC 40x platform support. - -config 405EX - bool - select IBM_EMAC_EMAC4 if IBM_EMAC - select IBM_EMAC_RGMII if IBM_EMAC - -config 405EZ - bool - select IBM_EMAC_NO_FLOW_CTRL if IBM_EMAC - select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC - select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC - -config PPC4xx_GPIO - bool "PPC4xx GPIO support" - depends on 40x - select GPIOLIB - help - Enable gpiolib support for ppc40x based boards - -config APM8018X - bool "APM8018X" - depends on 40x - select PPC40x_SIMPLE - help - This option enables support for the AppliedMicro APM8018X evaluation - board. diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile deleted file mode 100644 index 122de98527c4..000000000000 --- a/arch/powerpc/platforms/40x/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c deleted file mode 100644 index dce696c32679..000000000000 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ /dev/null @@ -1,78 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic PowerPC 40x platform support - * - * Copyright 2008 IBM Corporation - * - * This implements simple platform support for PowerPC 44x chips. This is - * mostly used for eval boards or other simple and "generic" 44x boards. If - * your board has custom functions or hardware, then you will likely want to - * implement your own board.c file to accommodate it. - */ - -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <asm/ppc4xx.h> -#include <asm/time.h> -#include <asm/udbg.h> -#include <asm/uic.h> - -#include <linux/init.h> -#include <linux/of_platform.h> - -static const struct of_device_id ppc40x_of_bus[] __initconst = { - { .compatible = "ibm,plb3", }, - { .compatible = "ibm,plb4", }, - { .compatible = "ibm,opb", }, - { .compatible = "ibm,ebc", }, - { .compatible = "simple-bus", }, - {}, -}; - -static int __init ppc40x_device_probe(void) -{ - of_platform_bus_probe(NULL, ppc40x_of_bus, NULL); - - return 0; -} -machine_device_initcall(ppc40x_simple, ppc40x_device_probe); - -/* This is the list of boards that can be supported by this simple - * platform code. This does _not_ mean the boards are compatible, - * as they most certainly are not from a device tree perspective. - * However, their differences are handled by the device tree and the - * drivers and therefore they don't need custom board support files. - * - * Again, if your board needs to do things differently then create a - * board.c file for it rather than adding it to this list. - */ -static const char * const board[] __initconst = { - "amcc,acadia", - "amcc,haleakala", - "amcc,kilauea", - "amcc,makalu", - "apm,klondike", - "est,hotfoot", - "plathome,obs600", - NULL -}; - -static int __init ppc40x_probe(void) -{ - if (of_device_compatible_match(of_root, board)) { - pci_set_flags(PCI_REASSIGN_ALL_RSRC); - return 1; - } - - return 0; -} - -define_machine(ppc40x_simple) { - .name = "PowerPC 40x Platform", - .probe = ppc40x_probe, - .progress = udbg_progress, - .init_IRQ = uic_init_tree, - .get_irq = uic_get_irq, - .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, -}; diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 25b80cd558f8..35a1f4b9f827 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -173,6 +173,7 @@ config ISS4xx config CURRITUCK bool "IBM Currituck (476fpe) Support" depends on PPC_47x + select I2C select SWIOTLB select 476FPE select FORCE_PCI @@ -230,6 +231,7 @@ config PPC4xx_GPIO bool "PPC4xx GPIO support" depends on 44x select GPIOLIB + select OF_GPIO_MM_GPIOCHIP help Enable gpiolib support for ppc440 based boards diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index 5ba031f57652..ca7b1bb442d9 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += misc_44x.o machine_check.o +obj-y += misc_44x.o machine_check.o uic.o soc.o ifneq ($(CONFIG_PPC4xx_CPM),y) obj-y += idle.o endif @@ -12,3 +12,7 @@ obj-$(CONFIG_CANYONLANDS)+= canyonlands.o obj-$(CONFIG_CURRITUCK) += ppc476.o obj-$(CONFIG_AKEBONO) += ppc476.o obj-$(CONFIG_FSP2) += fsp2.o +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o +obj-$(CONFIG_PPC4xx_CPM) += cpm.o +obj-$(CONFIG_PPC4xx_GPIO) += gpio.o diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c index 5b23aef8bdef..8742a10d9e0c 100644 --- a/arch/powerpc/platforms/44x/canyonlands.c +++ b/arch/powerpc/platforms/44x/canyonlands.c @@ -39,11 +39,9 @@ machine_device_initcall(canyonlands, ppc460ex_device_probe); static int __init ppc460ex_probe(void) { - if (of_machine_is_compatible("amcc,canyonlands")) { - pci_set_flags(PCI_REASSIGN_ALL_RSRC); - return 1; - } - return 0; + pci_set_flags(PCI_REASSIGN_ALL_RSRC); + + return 1; } /* USB PHY fixup code on Canyonlands kit. */ @@ -110,10 +108,10 @@ err_bcsr: machine_device_initcall(canyonlands, ppc460ex_canyonlands_fixup); define_machine(canyonlands) { .name = "Canyonlands", + .compatible = "amcc,canyonlands", .probe = ppc460ex_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/44x/cpm.c index 1d3bc35ee1a7..670f8ad4465b 100644 --- a/arch/powerpc/platforms/4xx/cpm.c +++ b/arch/powerpc/platforms/44x/cpm.c @@ -18,7 +18,7 @@ */ #include <linux/kernel.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/sysfs.h> #include <linux/cpu.h> #include <linux/suspend.h> @@ -63,7 +63,7 @@ static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask) * known as class 1, 2 and 3. For class 1 units, they are * unconditionally put to sleep when the corresponding CPM bit is * set. For class 2 and 3 units this is not case; if they can be - * put to to sleep, they will. Here we do not verify, we just + * put to sleep, they will. Here we do not verify, we just * set them and expect them to eventually go off when they can. */ value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]); diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c index 0d8f202bc45f..4861310c8dc0 100644 --- a/arch/powerpc/platforms/44x/ebony.c +++ b/arch/powerpc/platforms/44x/ebony.c @@ -45,9 +45,6 @@ machine_device_initcall(ebony, ebony_device_probe); */ static int __init ebony_probe(void) { - if (!of_machine_is_compatible("ibm,ebony")) - return 0; - pci_set_flags(PCI_REASSIGN_ALL_RSRC); return 1; @@ -55,10 +52,10 @@ static int __init ebony_probe(void) define_machine(ebony) { .name = "Ebony", + .compatible = "ibm,ebony", .probe = ebony_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c index e2e4f6d8150d..f6b8d02e08b0 100644 --- a/arch/powerpc/platforms/44x/fsp2.c +++ b/arch/powerpc/platforms/44x/fsp2.c @@ -205,7 +205,7 @@ static void __init node_irq_request(const char *compat, irq_handler_t errirq_han for_each_compatible_node(np, NULL, compat) { irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("device tree node %pOFn is missing a interrupt", np); of_node_put(np); @@ -313,5 +313,4 @@ define_machine(fsp2) { .init_IRQ = fsp2_irq_init, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/4xx/gpio.c b/arch/powerpc/platforms/44x/gpio.c index 49ee8d365852..d540e261d85a 100644 --- a/arch/powerpc/platforms/4xx/gpio.c +++ b/arch/powerpc/platforms/44x/gpio.c @@ -14,7 +14,7 @@ #include <linux/spinlock.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/of_gpio.h> +#include <linux/gpio/legacy-of-mm-gpiochip.h> #include <linux/gpio/driver.h> #include <linux/types.h> #include <linux/slab.h> @@ -75,8 +75,7 @@ __ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) clrbits32(®s->or, GPIO_MASK(gpio)); } -static void -ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); unsigned long flags; @@ -88,6 +87,8 @@ ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) spin_unlock_irqrestore(&chip->lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); + + return 0; } static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -179,7 +180,7 @@ static int __init ppc4xx_add_gpiochips(void) gc->direction_input = ppc4xx_gpio_dir_in; gc->direction_output = ppc4xx_gpio_dir_out; gc->get = ppc4xx_gpio_get; - gc->set = ppc4xx_gpio_set; + gc->set_rv = ppc4xx_gpio_set; ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc); if (ret) diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/44x/hsta_msi.c index d4f7fff1fc87..c6bd846b0d65 100644 --- a/arch/powerpc/platforms/4xx/hsta_msi.c +++ b/arch/powerpc/platforms/44x/hsta_msi.c @@ -11,7 +11,7 @@ #include <linux/msi.h> #include <linux/of.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/pci.h> #include <linux/semaphore.h> #include <asm/msi_bitmap.h> @@ -115,6 +115,7 @@ static void hsta_teardown_msi_irqs(struct pci_dev *dev) msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); pr_debug("%s: Teardown IRQ %u (index %u)\n", __func__, entry->irq, irq); + entry->irq = 0; } } diff --git a/arch/powerpc/platforms/44x/idle.c b/arch/powerpc/platforms/44x/idle.c index f533b495e7db..e2eeef8dff78 100644 --- a/arch/powerpc/platforms/44x/idle.c +++ b/arch/powerpc/platforms/44x/idle.c @@ -27,7 +27,7 @@ static void ppc44x_idle(void) isync(); } -int __init ppc44x_idle_init(void) +static int __init ppc44x_idle_init(void) { if (!mode_spin) { /* If we are not setting spin mode diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c index c5f82591408c..ef883d97fe15 100644 --- a/arch/powerpc/platforms/44x/iss4xx.c +++ b/arch/powerpc/platforms/44x/iss4xx.c @@ -52,7 +52,7 @@ static void __init iss4xx_init_irq(void) /* Find top level interrupt controller */ for_each_node_with_property(np, "interrupt-controller") { - if (of_get_property(np, "interrupts", NULL) == NULL) + if (!of_property_present(np, "interrupts")) break; } if (np == NULL) @@ -140,23 +140,11 @@ static void __init iss4xx_setup_arch(void) iss4xx_smp_init(); } -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init iss4xx_probe(void) -{ - if (!of_machine_is_compatible("ibm,iss-4xx")) - return 0; - - return 1; -} - define_machine(iss4xx) { .name = "ISS-4xx", - .probe = iss4xx_probe, + .compatible = "ibm,iss-4xx", .progress = udbg_progress, .init_IRQ = iss4xx_init_irq, .setup_arch = iss4xx_setup_arch, .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/44x/machine_check.c b/arch/powerpc/platforms/44x/machine_check.c index 5d19daacd78a..85ff33a8d9b6 100644 --- a/arch/powerpc/platforms/44x/machine_check.c +++ b/arch/powerpc/platforms/44x/machine_check.c @@ -9,6 +9,21 @@ #include <asm/reg.h> #include <asm/cacheflush.h> +int machine_check_4xx(struct pt_regs *regs) +{ + unsigned long reason = regs->esr; + + if (reason & ESR_IMCP) { + printk("Instruction"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + } else + printk("Data"); + + printk(" machine check in kernel mode.\n"); + + return 0; +} + int machine_check_440A(struct pt_regs *regs) { unsigned long reason = regs->esr; diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/44x/pci.c index ca5dd7a5842a..364aeb86ab64 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/44x/pci.c @@ -57,7 +57,7 @@ static inline int ppc440spe_revA(void) static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev) { struct pci_controller *hose; - int i; + struct resource *r; if (dev->devfn != 0 || dev->bus->self != NULL) return; @@ -79,9 +79,9 @@ static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev) /* Hide the PCI host BARs from the kernel as their content doesn't * fit well in the resource management */ - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - dev->resource[i].start = dev->resource[i].end = 0; - dev->resource[i].flags = 0; + pci_dev_for_each_resource(dev, r) { + r->start = r->end = 0; + r->flags = 0; } printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n", @@ -94,10 +94,8 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, struct resource *res) { u64 size; - const u32 *ranges; - int rlen; - int pna = of_n_addr_cells(hose->dn); - int np = pna + 5; + struct of_range_parser parser; + struct of_range range; /* Default */ res->start = 0; @@ -105,18 +103,15 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, res->end = size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; - /* Get dma-ranges property */ - ranges = of_get_property(hose->dn, "dma-ranges", &rlen); - if (ranges == NULL) + if (of_pci_dma_range_parser_init(&parser, hose->dn)) goto out; - /* Walk it */ - while ((rlen -= np * 4) >= 0) { - u32 pci_space = ranges[0]; - u64 pci_addr = of_read_number(ranges + 1, 2); - u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3); - size = of_read_number(ranges + pna + 3, 2); - ranges += np; + for_each_of_range(&parser, &range) { + u32 pci_space = range.flags; + u64 pci_addr = range.bus_addr; + u64 cpu_addr = range.cpu_addr; + size = range.size; + if (cpu_addr == OF_BAD_ADDR || size == 0) continue; @@ -348,7 +343,7 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np) } /* Check if primary bridge */ - if (of_get_property(np, "primary", NULL)) + if (of_property_read_bool(np, "primary")) primary = 1; /* Get bus range if any */ @@ -530,7 +525,7 @@ static void __init ppc4xx_probe_pcix_bridge(struct device_node *np) struct pci_controller *hose = NULL; void __iomem *reg = NULL; const int *bus_range; - int big_pim = 0, msi = 0, primary = 0; + int big_pim, msi, primary; /* Fetch config space registers address */ if (of_address_to_resource(np, 0, &rsrc_cfg)) { @@ -546,16 +541,13 @@ static void __init ppc4xx_probe_pcix_bridge(struct device_node *np) } /* Check if it supports large PIMs (440GX) */ - if (of_get_property(np, "large-inbound-windows", NULL)) - big_pim = 1; + big_pim = of_property_read_bool(np, "large-inbound-windows"); /* Check if we should enable MSIs inbound hole */ - if (of_get_property(np, "enable-msi-hole", NULL)) - msi = 1; + msi = of_property_read_bool(np, "enable-msi-hole"); /* Check if primary bridge */ - if (of_get_property(np, "primary", NULL)) - primary = 1; + primary = of_property_read_bool(np, "primary"); /* Get bus range if any */ bus_range = of_get_property(np, "bus-range", NULL); @@ -1266,102 +1258,6 @@ static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { #endif /* CONFIG_44x */ -#ifdef CONFIG_40x - -static int __init ppc405ex_pciex_core_init(struct device_node *np) -{ - /* Nothing to do, return 2 ports */ - return 2; -} - -static void __init ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port) -{ - /* Assert the PE0_PHY reset */ - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000); - msleep(1); - - /* deassert the PE0_hotreset */ - if (port->endpoint) - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000); - else - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000); - - /* poll for phy !reset */ - /* XXX FIXME add timeout */ - while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000)) - ; - - /* deassert the PE0_gpl_utl_reset */ - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000); -} - -static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port) -{ - u32 val; - - if (port->endpoint) - val = PTYPE_LEGACY_ENDPOINT; - else - val = PTYPE_ROOT_PORT; - - mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, - 1 << 24 | val << 20 | LNKW_X1 << 12); - - mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000); - mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000); - mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000); - mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003); - - /* - * Only reset the PHY when no link is currently established. - * This is for the Atheros PCIe board which has problems to establish - * the link (again) after this PHY reset. All other currently tested - * PCIe boards don't show this problem. - * This has to be re-tested and fixed in a later release! - */ - val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP); - if (!(val & 0x00001000)) - ppc405ex_pcie_phy_reset(port); - - dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */ - - port->has_ibpre = 1; - - return ppc4xx_pciex_port_reset_sdr(port); -} - -static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port) -{ - dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0); - - /* - * Set buffer allocations and then assert VRB and TXE. - */ - out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000); - out_be32(port->utl_base + PEUTL_INTR, 0x02000000); - out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000); - out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000); - out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000); - out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000); - out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000); - out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); - - out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000); - - return 0; -} - -static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = -{ - .want_sdr = true, - .core_init = ppc405ex_pciex_core_init, - .port_init_hw = ppc405ex_pciex_init_port_hw, - .setup_utl = ppc405ex_pciex_init_utl, - .check_link = ppc4xx_pciex_check_link_sdr, -}; - -#endif /* CONFIG_40x */ - #ifdef CONFIG_476FPE static int __init ppc_476fpe_pciex_core_init(struct device_node *np) { @@ -1430,10 +1326,6 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np) if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx")) ppc4xx_pciex_hwops = &apm821xx_pcie_hwops; #endif /* CONFIG_44x */ -#ifdef CONFIG_40x - if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) - ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops; -#endif #ifdef CONFIG_476FPE if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe") || of_device_is_compatible(np, "ibm,plb-pciex-476gtr")) @@ -1915,14 +1807,13 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) struct resource dma_window; struct pci_controller *hose = NULL; const int *bus_range; - int primary = 0, busses; + int primary, busses; void __iomem *mbase = NULL, *cfg_data = NULL; const u32 *pval; u32 val; /* Check if primary bridge */ - if (of_get_property(port->node, "primary", NULL)) - primary = 1; + primary = of_property_read_bool(port->node, "primary"); /* Get bus range if any */ bus_range = of_get_property(port->node, "bus-range", NULL); diff --git a/arch/powerpc/platforms/4xx/pci.h b/arch/powerpc/platforms/44x/pci.h index bb4821938ab1..bb4821938ab1 100644 --- a/arch/powerpc/platforms/4xx/pci.h +++ b/arch/powerpc/platforms/44x/pci.h diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c index 2a0dcdf04b21..971786ff1a7b 100644 --- a/arch/powerpc/platforms/44x/ppc44x_simple.c +++ b/arch/powerpc/platforms/44x/ppc44x_simple.c @@ -82,5 +82,4 @@ define_machine(ppc44x_simple) { .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index 20cc8f80b086..e7b7bdaad341 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -95,7 +95,7 @@ static int avr_probe(struct i2c_client *client) } static const struct i2c_device_id avr_id[] = { - { "akebono-avr", 0 }, + { "akebono-avr" }, { } }; @@ -103,7 +103,7 @@ static struct i2c_driver avr_driver = { .driver = { .name = "akebono-avr", }, - .probe_new = avr_probe, + .probe = avr_probe, .id_table = avr_id, }; @@ -114,7 +114,8 @@ static int __init ppc47x_device_probe(void) return 0; } -machine_device_initcall(ppc47x, ppc47x_device_probe); +machine_device_initcall(ppc47x_akebono, ppc47x_device_probe); +machine_device_initcall(ppc47x_currituck, ppc47x_device_probe); static void __init ppc47x_init_irq(void) { @@ -122,7 +123,7 @@ static void __init ppc47x_init_irq(void) /* Find top level interrupt controller */ for_each_node_with_property(np, "interrupt-controller") { - if (of_get_property(np, "interrupts", NULL) == NULL) + if (!of_property_present(np, "interrupts")) break; } if (np == NULL) @@ -140,6 +141,8 @@ static void __init ppc47x_init_irq(void) ppc_md.get_irq = mpic_get_irq; } else panic("Unrecognized top level interrupt controller"); + + of_node_put(np); } #ifdef CONFIG_SMP @@ -247,7 +250,8 @@ fail: pr_info("%s: Unable to find board revision\n", __func__); return 0; } -machine_arch_initcall(ppc47x, ppc47x_get_board_rev); +machine_arch_initcall(ppc47x_akebono, ppc47x_get_board_rev); +machine_arch_initcall(ppc47x_currituck, ppc47x_get_board_rev); /* Use USB controller should have been hardware swizzled but it wasn't :( */ static void ppc47x_pci_irq_fixup(struct pci_dev *dev) @@ -266,28 +270,21 @@ static void ppc47x_pci_irq_fixup(struct pci_dev *dev) } } -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init ppc47x_probe(void) -{ - if (of_machine_is_compatible("ibm,akebono")) - return 1; - - if (of_machine_is_compatible("ibm,currituck")) { - ppc_md.pci_irq_fixup = ppc47x_pci_irq_fixup; - return 1; - } - - return 0; -} +define_machine(ppc47x_akebono) { + .name = "PowerPC 47x (akebono)", + .compatible = "ibm,akebono", + .progress = udbg_progress, + .init_IRQ = ppc47x_init_irq, + .setup_arch = ppc47x_setup_arch, + .restart = ppc4xx_reset_system, +}; -define_machine(ppc47x) { - .name = "PowerPC 47x", - .probe = ppc47x_probe, +define_machine(ppc47x_currituck) { + .name = "PowerPC 47x (currituck)", + .compatible = "ibm,currituck", .progress = udbg_progress, .init_IRQ = ppc47x_init_irq, + .pci_irq_fixup = ppc47x_pci_irq_fixup, .setup_arch = ppc47x_setup_arch, .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c index ed854b53877e..5cdaa4068e41 100644 --- a/arch/powerpc/platforms/44x/sam440ep.c +++ b/arch/powerpc/platforms/44x/sam440ep.c @@ -41,9 +41,6 @@ machine_device_initcall(sam440ep, sam440ep_device_probe); static int __init sam440ep_probe(void) { - if (!of_machine_is_compatible("acube,sam440ep")) - return 0; - pci_set_flags(PCI_REASSIGN_ALL_RSRC); return 1; @@ -51,12 +48,12 @@ static int __init sam440ep_probe(void) define_machine(sam440ep) { .name = "Sam440ep", + .compatible = "acube,sam440ep", .probe = sam440ep_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, }; static struct i2c_board_info sam440ep_rtc_info = { diff --git a/arch/powerpc/platforms/4xx/soc.c b/arch/powerpc/platforms/44x/soc.c index ac1cd8b17879..5412e6b21e10 100644 --- a/arch/powerpc/platforms/4xx/soc.c +++ b/arch/powerpc/platforms/44x/soc.c @@ -15,12 +15,13 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/of.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> #include <asm/dcr.h> #include <asm/dcr-regs.h> #include <asm/reg.h> +#include <asm/ppc4xx.h> static u32 dcrbase_l2c; @@ -111,7 +112,7 @@ static int __init ppc4xx_l2c_probe(void) } /* Install error handler */ - if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) { + if (request_irq(irq, l2c_error_handler, 0, "L2C", NULL) < 0) { printk(KERN_ERR "Cannot install L2C error handler" ", cache is not enabled\n"); of_node_put(np); diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/44x/uic.c index d667ad039bd3..85daf841fd3f 100644 --- a/arch/powerpc/platforms/4xx/uic.c +++ b/arch/powerpc/platforms/44x/uic.c @@ -24,6 +24,7 @@ #include <asm/irq.h> #include <asm/io.h> #include <asm/dcr.h> +#include <asm/uic.h> #define NR_UIC_INTS 32 @@ -36,7 +37,7 @@ #define UIC_VR 0x7 #define UIC_VCR 0x8 -struct uic *primary_uic; +static struct uic *primary_uic; struct uic { int index; @@ -253,8 +254,9 @@ static struct uic * __init uic_init_one(struct device_node *node) } uic->dcrbase = *dcrreg; - uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, - uic); + uic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), + NR_UIC_INTS, &uic_host_ops, + uic); if (! uic->irqhost) return NULL; /* FIXME: panic? */ @@ -290,7 +292,7 @@ void __init uic_init_tree(void) if (!primary_uic) panic("Unable to initialize primary UIC %pOF\n", np); - irq_set_default_host(primary_uic->irqhost); + irq_set_default_domain(primary_uic->irqhost); of_node_put(np); /* The scan again for cascaded UICs */ @@ -326,5 +328,5 @@ unsigned int uic_get_irq(void) msr = mfdcr(primary_uic->dcrbase + UIC_MSR); src = 32 - ffs(msr); - return irq_linear_revmap(primary_uic->irqhost, src); + return irq_find_mapping(primary_uic->irqhost, src); } diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c index f03432ef010b..a5001d32f978 100644 --- a/arch/powerpc/platforms/44x/warp.c +++ b/arch/powerpc/platforms/44x/warp.c @@ -5,15 +5,18 @@ * Copyright (c) 2008-2009 PIKA Technologies * Sean MacLennan <smaclennan@pikatech.com> */ +#include <linux/err.h> #include <linux/init.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/kthread.h> +#include <linux/leds.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #include <linux/slab.h> #include <linux/export.h> @@ -39,22 +42,13 @@ static int __init warp_device_probe(void) } machine_device_initcall(warp, warp_device_probe); -static int __init warp_probe(void) -{ - if (!of_machine_is_compatible("pika,warp")) - return 0; - - return 1; -} - define_machine(warp) { .name = "Warp", - .probe = warp_probe, + .compatible = "pika,warp", .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, }; @@ -90,60 +84,44 @@ static int __init warp_post_info(void) #ifdef CONFIG_SENSORS_AD7414 -static LIST_HEAD(dtm_shutdown_list); static void __iomem *dtm_fpga; -static unsigned green_led, red_led; - -struct dtm_shutdown { - struct list_head list; - void (*func)(void *arg); - void *arg; +#define WARP_GREEN_LED 0 +#define WARP_RED_LED 1 + +static struct gpio_led warp_gpio_led_pins[] = { + [WARP_GREEN_LED] = { + .name = "green", + .default_state = LEDS_DEFSTATE_KEEP, + .gpiod = NULL, /* to be filled by pika_setup_leds() */ + }, + [WARP_RED_LED] = { + .name = "red", + .default_state = LEDS_DEFSTATE_KEEP, + .gpiod = NULL, /* to be filled by pika_setup_leds() */ + }, }; +static struct gpio_led_platform_data warp_gpio_led_data = { + .leds = warp_gpio_led_pins, + .num_leds = ARRAY_SIZE(warp_gpio_led_pins), +}; -int pika_dtm_register_shutdown(void (*func)(void *arg), void *arg) -{ - struct dtm_shutdown *shutdown; - - shutdown = kmalloc(sizeof(struct dtm_shutdown), GFP_KERNEL); - if (shutdown == NULL) - return -ENOMEM; - - shutdown->func = func; - shutdown->arg = arg; - - list_add(&shutdown->list, &dtm_shutdown_list); - - return 0; -} - -int pika_dtm_unregister_shutdown(void (*func)(void *arg), void *arg) -{ - struct dtm_shutdown *shutdown; - - list_for_each_entry(shutdown, &dtm_shutdown_list, list) - if (shutdown->func == func && shutdown->arg == arg) { - list_del(&shutdown->list); - kfree(shutdown); - return 0; - } - - return -EINVAL; -} +static struct platform_device warp_gpio_leds = { + .name = "leds-gpio", + .id = -1, + .dev = { + .platform_data = &warp_gpio_led_data, + }, +}; static irqreturn_t temp_isr(int irq, void *context) { - struct dtm_shutdown *shutdown; int value = 1; local_irq_disable(); - gpio_set_value(green_led, 0); - - /* Run through the shutdown list. */ - list_for_each_entry(shutdown, &dtm_shutdown_list, list) - shutdown->func(shutdown->arg); + gpiod_set_value(warp_gpio_led_pins[WARP_GREEN_LED].gpiod, 0); printk(KERN_EMERG "\n\nCritical Temperature Shutdown\n\n"); @@ -153,7 +131,7 @@ static irqreturn_t temp_isr(int irq, void *context) out_be32(dtm_fpga + 0x14, reset); } - gpio_set_value(red_led, value); + gpiod_set_value(warp_gpio_led_pins[WARP_RED_LED].gpiod, value); value ^= 1; mdelay(500); } @@ -162,25 +140,78 @@ static irqreturn_t temp_isr(int irq, void *context) return IRQ_HANDLED; } +/* + * Because green and red power LEDs are normally driven by leds-gpio driver, + * but in case of critical temperature shutdown we want to drive them + * ourselves, we acquire both and then create leds-gpio platform device + * ourselves, instead of doing it through device tree. This way we can still + * keep access to the gpios and use them when needed. + */ static int pika_setup_leds(void) { struct device_node *np, *child; + struct gpio_desc *gpio; + struct gpio_led *led; + int led_count = 0; + int error; + int i; - np = of_find_compatible_node(NULL, NULL, "gpio-leds"); + np = of_find_compatible_node(NULL, NULL, "warp-power-leds"); if (!np) { printk(KERN_ERR __FILE__ ": Unable to find leds\n"); return -ENOENT; } - for_each_child_of_node(np, child) - if (of_node_name_eq(child, "green")) - green_led = of_get_gpio(child, 0); - else if (of_node_name_eq(child, "red")) - red_led = of_get_gpio(child, 0); + for_each_child_of_node(np, child) { + for (i = 0; i < ARRAY_SIZE(warp_gpio_led_pins); i++) { + led = &warp_gpio_led_pins[i]; + + if (!of_node_name_eq(child, led->name)) + continue; + + if (led->gpiod) { + printk(KERN_ERR __FILE__ ": %s led has already been defined\n", + led->name); + continue; + } + + gpio = fwnode_gpiod_get_index(of_fwnode_handle(child), + NULL, 0, GPIOD_ASIS, + led->name); + error = PTR_ERR_OR_ZERO(gpio); + if (error) { + printk(KERN_ERR __FILE__ ": Failed to get %s led gpio: %d\n", + led->name, error); + of_node_put(child); + goto err_cleanup_pins; + } + + led->gpiod = gpio; + led_count++; + } + } of_node_put(np); + /* Skip device registration if no leds have been defined */ + if (led_count) { + error = platform_device_register(&warp_gpio_leds); + if (error) { + printk(KERN_ERR __FILE__ ": Unable to add leds-gpio: %d\n", + error); + goto err_cleanup_pins; + } + } + return 0; + +err_cleanup_pins: + for (i = 0; i < ARRAY_SIZE(warp_gpio_led_pins); i++) { + led = &warp_gpio_led_pins[i]; + gpiod_put(led->gpiod); + led->gpiod = NULL; + } + return error; } static void pika_setup_critical_temp(struct device_node *np, @@ -294,19 +325,6 @@ machine_late_initcall(warp, pika_dtm_start); #else /* !CONFIG_SENSORS_AD7414 */ -int pika_dtm_register_shutdown(void (*func)(void *arg), void *arg) -{ - return 0; -} - -int pika_dtm_unregister_shutdown(void (*func)(void *arg), void *arg) -{ - return 0; -} - machine_late_initcall(warp, warp_post_info); #endif - -EXPORT_SYMBOL(pika_dtm_register_shutdown); -EXPORT_SYMBOL(pika_dtm_unregister_shutdown); diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile deleted file mode 100644 index 2071a0abe09b..000000000000 --- a/arch/powerpc/platforms/4xx/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-y += uic.o machine_check.o -obj-$(CONFIG_4xx_SOC) += soc.o -obj-$(CONFIG_PCI) += pci.o -obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o -obj-$(CONFIG_PPC4xx_CPM) += cpm.o -obj-$(CONFIG_PPC4xx_GPIO) += gpio.o diff --git a/arch/powerpc/platforms/4xx/machine_check.c b/arch/powerpc/platforms/4xx/machine_check.c deleted file mode 100644 index a905da1d6f41..000000000000 --- a/arch/powerpc/platforms/4xx/machine_check.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include <linux/kernel.h> -#include <linux/printk.h> -#include <linux/ptrace.h> - -#include <asm/reg.h> - -int machine_check_4xx(struct pt_regs *regs) -{ - unsigned long reason = regs->esr; - - if (reason & ESR_IMCP) { - printk("Instruction"); - mtspr(SPRN_ESR, reason & ~ESR_IMCP); - } else - printk("Data"); - printk(" machine check in kernel mode.\n"); - - return 0; -} diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index 0652c7e69225..079cb3627eac 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -950,7 +950,7 @@ static void __init mpc5121_clk_register_of_provider(struct device_node *np) */ static void __init mpc5121_clk_provide_migration_support(void) { - + struct device_node *np; /* * pre-enable those clock items which are not yet appropriately * acquired by their peripheral driver @@ -970,7 +970,9 @@ static void __init mpc5121_clk_provide_migration_support(void) * unused and so it gets disabled */ clk_prepare_enable(clks[MPC512x_CLK_PSC3_MCLK]);/* serial console */ - if (of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci")) + np = of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci"); + of_node_put(np); + if (np) clk_prepare_enable(clks[MPC512x_CLK_PCI]); } @@ -984,7 +986,7 @@ static void __init mpc5121_clk_provide_migration_support(void) #define NODE_PREP do { \ of_address_to_resource(np, 0, &res); \ - snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \ + snprintf(devname, sizeof(devname), "%pa.%s", &res.start, np->name); \ } while (0) #define NODE_CHK(clkname, clkitem, regnode, regflag) do { \ @@ -1208,6 +1210,8 @@ int __init mpc5121_clk_init(void) /* register as an OF clock provider */ mpc5121_clk_register_of_provider(clk_np); + of_node_put(clk_np); + /* * unbreak not yet adjusted peripheral drivers during migration * towards fully operational common clock support, and allow diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c index fc3fb999cd74..a18f85b3ef36 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads.c @@ -10,7 +10,7 @@ #include <linux/kernel.h> #include <linux/io.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/ipic.h> @@ -53,9 +53,6 @@ static void __init mpc5121_ads_init_IRQ(void) */ static int __init mpc5121_ads_probe(void) { - if (!of_machine_is_compatible("fsl,mpc5121ads")) - return 0; - mpc512x_init_early(); return 1; @@ -63,12 +60,12 @@ static int __init mpc5121_ads_probe(void) define_machine(mpc5121_ads) { .name = "MPC5121 ADS", + .compatible = "fsl,mpc5121ads", .probe = mpc5121_ads_probe, .setup_arch = mpc5121_ads_setup_arch, .discover_phbs = mpc5121_ads_setup_pci, .init = mpc512x_init, .init_IRQ = mpc5121_ads_init_IRQ, .get_irq = ipic_get_irq, - .calibrate_decr = generic_calibrate_decr, .restart = mpc512x_restart, }; diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index 6f08d07aee3b..2cf3c6237337 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -17,6 +17,8 @@ #include <linux/of_address.h> #include <linux/of_irq.h> +#include "mpc5121_ads.h" + static struct device_node *cpld_pic_node; static struct irq_domain *cpld_pic_host; @@ -186,7 +188,8 @@ mpc5121_ads_cpld_pic_init(void) cpld_pic_node = of_node_get(np); - cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL); + cpld_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 16, + &cpld_pic_host_ops, NULL); if (!cpld_pic_host) { printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); goto end; diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h index 2f3c60e373e1..d2cb06e3a436 100644 --- a/arch/powerpc/platforms/512x/mpc512x.h +++ b/arch/powerpc/platforms/512x/mpc512x.h @@ -13,7 +13,6 @@ extern void __init mpc512x_init(void); extern void __init mpc512x_setup_arch(void); extern int __init mpc5121_clk_init(void); const char *__init mpc512x_select_psc_compat(void); -const char *__init mpc512x_select_reset_compat(void); extern void __noreturn mpc512x_restart(char *cmd); #endif /* __MPC512X_H__ */ diff --git a/arch/powerpc/platforms/512x/mpc512x_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c index 364564c995bd..d4fa6c302ccf 100644 --- a/arch/powerpc/platforms/512x/mpc512x_generic.c +++ b/arch/powerpc/platforms/512x/mpc512x_generic.c @@ -9,7 +9,7 @@ */ #include <linux/kernel.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/ipic.h> @@ -32,9 +32,6 @@ static const char * const board[] __initconst = { */ static int __init mpc512x_generic_probe(void) { - if (!of_device_compatible_match(of_root, board)) - return 0; - mpc512x_init_early(); return 1; @@ -42,11 +39,11 @@ static int __init mpc512x_generic_probe(void) define_machine(mpc512x_generic) { .name = "MPC512x generic", + .compatibles = board, .probe = mpc512x_generic_probe, .init = mpc512x_init, .setup_arch = mpc512x_setup_arch, .init_IRQ = mpc512x_init_IRQ, .get_irq = ipic_get_irq, - .calibrate_decr = generic_calibrate_decr, .restart = mpc512x_restart, }; diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c index 04bf6ecf7d55..9668b052cd4b 100644 --- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c +++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c @@ -10,9 +10,9 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> #include <asm/mpc5121.h> #include <asm/io.h> #include <linux/spinlock.h> @@ -373,50 +373,32 @@ static int get_cs_ranges(struct device *dev) { int ret = -ENODEV; struct device_node *lb_node; - const u32 *addr_cells_p; - const u32 *size_cells_p; - int proplen; - size_t i; + size_t i = 0; + struct of_range_parser parser; + struct of_range range; lb_node = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-localbus"); if (!lb_node) return ret; - /* - * The node defined as compatible with 'fsl,mpc5121-localbus' - * should have two address cells and one size cell. - * Every item of its ranges property should consist of: - * - the first address cell which is the chipselect number; - * - the second address cell which is the offset in the chipselect, - * must be zero. - * - CPU address of the beginning of an access window; - * - the only size cell which is the size of an access window. - */ - addr_cells_p = of_get_property(lb_node, "#address-cells", NULL); - size_cells_p = of_get_property(lb_node, "#size-cells", NULL); - if (addr_cells_p == NULL || *addr_cells_p != 2 || - size_cells_p == NULL || *size_cells_p != 1) { - goto end; - } - - proplen = of_property_count_u32_elems(lb_node, "ranges"); - if (proplen <= 0 || proplen % 4 != 0) - goto end; + of_range_parser_init(&parser, lb_node); + lpbfifo.cs_n = of_range_count(&parser); - lpbfifo.cs_n = proplen / 4; lpbfifo.cs_ranges = devm_kcalloc(dev, lpbfifo.cs_n, sizeof(struct cs_range), GFP_KERNEL); if (!lpbfifo.cs_ranges) goto end; - if (of_property_read_u32_array(lb_node, "ranges", - (u32 *)lpbfifo.cs_ranges, proplen) != 0) { - goto end; - } - - for (i = 0; i < lpbfifo.cs_n; i++) { - if (lpbfifo.cs_ranges[i].base != 0) + for_each_of_range(&parser, &range) { + u32 base = lower_32_bits(range.bus_addr); + if (base) goto end; + + lpbfifo.cs_ranges[i].csnum = upper_32_bits(range.bus_addr); + lpbfifo.cs_ranges[i].base = base; + lpbfifo.cs_ranges[i].addr = range.cpu_addr; + lpbfifo.cs_ranges[i].size = range.size; + i++; } ret = 0; @@ -495,7 +477,7 @@ static int mpc512x_lpbfifo_probe(struct platform_device *pdev) return ret; } -static int mpc512x_lpbfifo_remove(struct platform_device *pdev) +static void mpc512x_lpbfifo_remove(struct platform_device *pdev) { unsigned long flags; struct dma_device *dma_dev = lpbfifo.chan->device; @@ -512,8 +494,6 @@ static int mpc512x_lpbfifo_remove(struct platform_device *pdev) free_irq(lpbfifo.irq, &pdev->dev); irq_dispose_mapping(lpbfifo.irq); dma_release_channel(lpbfifo.chan); - - return 0; } static const struct of_device_id mpc512x_lpbfifo_match[] = { diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index 5ac0ead2540f..8c1f3b629fc7 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -29,20 +29,6 @@ static struct mpc512x_reset_module __iomem *reset_module_base; -static void __init mpc512x_restart_init(void) -{ - struct device_node *np; - const char *reset_compat; - - reset_compat = mpc512x_select_reset_compat(); - np = of_find_compatible_node(NULL, NULL, reset_compat); - if (!np) - return; - - reset_module_base = of_iomap(np, 0); - of_node_put(np); -} - void __noreturn mpc512x_restart(char *cmd) { if (reset_module_base) { @@ -293,7 +279,7 @@ static void __init mpc512x_setup_diu(void) * and so negatively affect boot time. Instead we reserve the * already configured frame buffer area so that it won't be * destroyed. The starting address of the area to reserve and - * also it's length is passed to memblock_reserve(). It will be + * also its length is passed to memblock_reserve(). It will be * freed later on first open of fbdev, when splash image is not * needed any more. */ @@ -363,7 +349,7 @@ const char *__init mpc512x_select_psc_compat(void) return NULL; } -const char *__init mpc512x_select_reset_compat(void) +static const char *__init mpc512x_select_reset_compat(void) { if (of_machine_is_compatible("fsl,mpc5121")) return "fsl,mpc5121-reset"; @@ -455,6 +441,20 @@ static void __init mpc512x_psc_fifo_init(void) } } +static void __init mpc512x_restart_init(void) +{ + struct device_node *np; + const char *reset_compat; + + reset_compat = mpc512x_select_reset_compat(); + np = of_find_compatible_node(NULL, NULL, reset_compat); + if (!np) + return; + + reset_module_base = of_iomap(np, 0); + of_node_put(np); +} + void __init mpc512x_init_early(void) { mpc512x_restart_init(); diff --git a/arch/powerpc/platforms/512x/pdm360ng.c b/arch/powerpc/platforms/512x/pdm360ng.c index 1e911f42697d..8bbbf78bb42b 100644 --- a/arch/powerpc/platforms/512x/pdm360ng.c +++ b/arch/powerpc/platforms/512x/pdm360ng.c @@ -7,11 +7,12 @@ * PDM360NG board setup */ +#include <linux/device.h> #include <linux/kernel.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_fdt.h> -#include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/ipic.h> @@ -100,7 +101,7 @@ static inline void __init pdm360ng_touchscreen_init(void) } #endif /* CONFIG_TOUCHSCREEN_ADS7846 */ -void __init pdm360ng_init(void) +static void __init pdm360ng_init(void) { mpc512x_init(); pdm360ng_touchscreen_init(); @@ -108,9 +109,6 @@ void __init pdm360ng_init(void) static int __init pdm360ng_probe(void) { - if (!of_machine_is_compatible("ifm,pdm360ng")) - return 0; - mpc512x_init_early(); return 1; @@ -118,11 +116,11 @@ static int __init pdm360ng_probe(void) define_machine(pdm360ng) { .name = "PDM360NG", + .compatible = "ifm,pdm360ng", .probe = pdm360ng_probe, .setup_arch = mpc512x_setup_arch, .init = pdm360ng_init, .init_IRQ = mpc512x_init_IRQ, .get_irq = ipic_get_irq, - .calibrate_decr = generic_calibrate_decr, .restart = mpc512x_restart, }; diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index b72ed2950ca8..384e4bef2c28 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig @@ -54,8 +54,3 @@ config PPC_MPC5200_BUGFIX for MPC5200B based boards. It is safe to say 'Y' here - -config PPC_MPC5200_LPBFIFO - tristate "MPC5200 LocalPlus bus FIFO driver" - depends on PPC_MPC52xx && PPC_BESTCOMM - select PPC_BESTCOMM_GEN_BD diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile index f40d48eab779..1b1f72d83342 100644 --- a/arch/powerpc/platforms/52xx/Makefile +++ b/arch/powerpc/platforms/52xx/Makefile @@ -14,5 +14,3 @@ obj-$(CONFIG_PM) += mpc52xx_sleep.o mpc52xx_pm.o ifdef CONFIG_PPC_LITE5200 obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o endif - -obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index e0647720ed5e..a7172f9ebaad 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -13,6 +13,7 @@ #include <generated/utsrelease.h> #include <linux/pci.h> #include <linux/of.h> +#include <linux/seq_file.h> #include <asm/dma.h> #include <asm/time.h> #include <asm/machdep.h> @@ -41,7 +42,7 @@ static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int ret = -1; int rval; - rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len); + rval = rtas_call(rtas_function_token(RTAS_FN_READ_PCI_CONFIG), 2, 2, &ret, addr, len); *val = ret; return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; } @@ -55,7 +56,7 @@ static int rtas_write_config(struct pci_bus *bus, unsigned int devfn, | (hose->global_number << 24); int rval; - rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL, + rval = rtas_call(rtas_function_token(RTAS_FN_WRITE_PCI_CONFIG), 3, 1, NULL, addr, len, val); return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; } @@ -195,8 +196,10 @@ static void __init efika_setup_arch(void) static int __init efika_probe(void) { - const char *model = of_get_property(of_root, "model", NULL); + struct device_node *root = of_find_node_by_path("/"); + const char *model = of_get_property(root, "model", NULL); + of_node_put(root); if (model == NULL) return 0; if (strcmp(model, "EFIKA5K2")) @@ -226,7 +229,6 @@ define_machine(efika) .get_rtc_time = rtas_get_rtc_time, .progress = rtas_progress, .get_boot_time = rtas_get_boot_time, - .calibrate_decr = generic_calibrate_decr, #ifdef CONFIG_PCI .phys_mem_access_prot = pci_phys_mem_access_prot, #endif diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c index 7ea9b6ce0591..0a161d82a3a8 100644 --- a/arch/powerpc/platforms/52xx/lite5200.c +++ b/arch/powerpc/platforms/52xx/lite5200.c @@ -172,22 +172,13 @@ static const char * const board[] __initconst = { NULL, }; -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init lite5200_probe(void) -{ - return of_device_compatible_match(of_root, board); -} - define_machine(lite5200) { .name = "lite5200", - .probe = lite5200_probe, + .compatibles = board, .setup_arch = lite5200_setup_arch, .discover_phbs = mpc52xx_setup_pci, .init = mpc52xx_declare_of_platform_devices, .init_IRQ = mpc52xx_init_irq, .get_irq = mpc52xx_get_irq, .restart = mpc52xx_restart, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index 129313b1d021..4900f5f48cce 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c @@ -47,15 +47,14 @@ static int lite5200_pm_begin(suspend_state_t state) static int lite5200_pm_prepare(void) { struct device_node *np; - const struct of_device_id immr_ids[] = { + static const struct of_device_id immr_ids[] = { { .compatible = "fsl,mpc5200-immr", }, { .compatible = "fsl,mpc5200b-immr", }, { .type = "soc", .compatible = "mpc5200", }, /* lite5200 */ { .type = "builtin", .compatible = "mpc5200", }, /* efika */ {} }; - u64 regaddr64 = 0; - const u32 *regaddr_p; + struct resource res; /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) @@ -66,12 +65,10 @@ static int lite5200_pm_prepare(void) /* map registers */ np = of_find_matching_node(NULL, immr_ids); - regaddr_p = of_get_address(np, 0, NULL, NULL); - if (regaddr_p) - regaddr64 = of_translate_address(np, regaddr_p); + of_address_to_resource(np, 0, &res); of_node_put(np); - mbar = ioremap((u32) regaddr64, 0xC000); + mbar = ioremap(res.start, 0xC000); if (!mbar) { printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__); return -ENOSYS; diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index afee8b1515a8..0ec2522ee4ad 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> + #include <asm/reg.h> #include <asm/ppc_asm.h> #include <asm/processor.h> @@ -178,7 +180,8 @@ sram_code: /* local udelay in sram is needed */ - udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ +SYM_FUNC_START_LOCAL(udelay) + /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ mullw r12, r12, r11 mftb r13 /* start */ add r12, r13, r12 /* end */ @@ -187,6 +190,7 @@ sram_code: cmp cr0, r13, r12 blt 1b blr +SYM_FUNC_END(udelay) sram_code_end: @@ -199,7 +203,8 @@ lite5200_wakeup: /* HIDs, MSR */ LOAD_SPRN(HID1, 0x19) - LOAD_SPRN(HID2, 0x1a) + /* FIXME: Should this use HID2_G2_LE? */ + LOAD_SPRN(HID2_750FX, 0x1a) /* address translation is tricky (see turn_on_mmu) */ @@ -271,7 +276,7 @@ _ASM_NOKPROBE_SYMBOL(lite5200_wakeup) SAVE_SR(n+2, addr+2); \ SAVE_SR(n+3, addr+3); -save_regs: +SYM_FUNC_START_LOCAL(save_regs) stw r0, 0(r4) stw r1, 0x4(r4) stw r2, 0x8(r4) @@ -279,7 +284,8 @@ save_regs: SAVE_SPRN(HID0, 0x18) SAVE_SPRN(HID1, 0x19) - SAVE_SPRN(HID2, 0x1a) + /* FIXME: Should this use HID2_G2_LE? */ + SAVE_SPRN(HID2_750FX, 0x1a) mfmsr r10 stw r10, (4*0x1b)(r4) /*SAVE_SPRN(LR, 0x1c) have to save it before the call */ @@ -317,6 +323,7 @@ save_regs: SAVE_SPRN(TBRU, 0x5b) blr +SYM_FUNC_END(save_regs) /* restore registers */ @@ -336,7 +343,7 @@ save_regs: LOAD_SR(n+2, addr+2); \ LOAD_SR(n+3, addr+3); -restore_regs: +SYM_FUNC_START_LOCAL(restore_regs) lis r4, registers@h ori r4, r4, registers@l @@ -393,6 +400,7 @@ restore_regs: blr _ASM_NOKPROBE_SYMBOL(restore_regs) +SYM_FUNC_END(restore_regs) @@ -403,7 +411,7 @@ _ASM_NOKPROBE_SYMBOL(restore_regs) * Flush data cache * Do this by just reading lots of stuff into the cache. */ -flush_data_cache: +SYM_FUNC_START_LOCAL(flush_data_cache) lis r3,CONFIG_KERNEL_START@h ori r3,r3,CONFIG_KERNEL_START@l li r4,NUM_CACHE_LINES @@ -413,3 +421,4 @@ flush_data_cache: addi r3,r3,L1_CACHE_BYTES /* Next line, please */ bdnz 1b blr +SYM_FUNC_END(flush_data_cache) diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index ee367ff3ec8a..bc7f83cfec1d 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -168,12 +168,14 @@ static void __init media5200_init_irq(void) spin_lock_init(&media5200_irq.lock); - media5200_irq.irqhost = irq_domain_add_linear(fpga_np, + media5200_irq.irqhost = irq_domain_create_linear(of_fwnode_handle(fpga_np), MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq); if (!media5200_irq.irqhost) goto out; pr_debug("%s: allocated irqhost\n", __func__); + of_node_put(fpga_np); + irq_set_handler_data(cascade_virq, &media5200_irq); irq_set_chained_handler(cascade_virq, media5200_irq_cascade); @@ -181,6 +183,7 @@ static void __init media5200_init_irq(void) out: pr_err("Could not find Media5200 FPGA; PCI interrupts will not work\n"); + of_node_put(fpga_np); } /* @@ -224,28 +227,13 @@ static void __init media5200_setup_arch(void) } -/* list of the supported boards */ -static const char * const board[] __initconst = { - "fsl,media5200", - NULL -}; - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init media5200_probe(void) -{ - return of_device_compatible_match(of_root, board); -} - define_machine(media5200_platform) { .name = "media5200-platform", - .probe = media5200_probe, + .compatible = "fsl,media5200", .setup_arch = media5200_setup_arch, .discover_phbs = mpc52xx_setup_pci, .init = mpc52xx_declare_of_platform_devices, .init_IRQ = media5200_init_irq, .get_irq = mpc52xx_get_irq, .restart = mpc52xx_restart, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c index cc349d579061..7e0e4c34a40b 100644 --- a/arch/powerpc/platforms/52xx/mpc5200_simple.c +++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c @@ -59,22 +59,13 @@ static const char *board[] __initdata = { NULL }; -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc5200_simple_probe(void) -{ - return of_device_compatible_match(of_root, board); -} - define_machine(mpc5200_simple_platform) { .name = "mpc5200-simple-platform", - .probe = mpc5200_simple_probe, + .compatibles = board, .setup_arch = mpc5200_simple_setup_arch, .discover_phbs = mpc52xx_setup_pci, .init = mpc52xx_declare_of_platform_devices, .init_IRQ = mpc52xx_init_irq, .get_irq = mpc52xx_get_irq, .restart = mpc52xx_restart, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 4348506d667d..253421ffb4e5 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c @@ -12,12 +12,10 @@ #undef DEBUG -#include <linux/gpio.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/of_gpio.h> #include <linux/export.h> #include <asm/io.h> #include <asm/mpc52xx.h> @@ -141,8 +139,8 @@ mpc52xx_map_common_devices(void) * on a gpt0, so check has-wdt property before mapping. */ for_each_matching_node(np, mpc52xx_gpt_ids) { - if (of_get_property(np, "fsl,has-wdt", NULL) || - of_get_property(np, "has-wdt", NULL)) { + if (of_property_read_bool(np, "fsl,has-wdt") || + of_property_read_bool(np, "has-wdt")) { mpc52xx_wdt = of_iomap(np, 0); of_node_put(np); break; @@ -204,43 +202,6 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv) EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv); /** - * mpc52xx_get_xtal_freq - Get SYS_XTAL_IN frequency for a device - * - * @node: device node - * - * Returns the frequency of the external oscillator clock connected - * to the SYS_XTAL_IN pin, or 0 if it cannot be determined. - */ -unsigned int mpc52xx_get_xtal_freq(struct device_node *node) -{ - u32 val; - unsigned int freq; - - if (!mpc52xx_cdm) - return 0; - - freq = mpc5xxx_get_bus_frequency(node); - if (!freq) - return 0; - - if (in_8(&mpc52xx_cdm->ipb_clk_sel) & 0x1) - freq *= 2; - - val = in_be32(&mpc52xx_cdm->rstcfg); - if (val & (1 << 5)) - freq *= 8; - else - freq *= 4; - if (val & (1 << 6)) - freq /= 12; - else - freq /= 16; - - return freq; -} -EXPORT_SYMBOL(mpc52xx_get_xtal_freq); - -/** * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer */ void __noreturn mpc52xx_restart(char *cmd) diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 968f5b727273..bda707d848a6 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -48,7 +48,7 @@ * the output mode. This driver does not change the output mode setting. */ -#include <linux/device.h> +#include <linux/gpio/driver.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -57,9 +57,9 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> -#include <linux/of_gpio.h> +#include <linux/platform_device.h> #include <linux/kernel.h> +#include <linux/property.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/watchdog.h> @@ -247,9 +247,9 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) if (!cascade_virq) return; - gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt); + gpt->irqhost = irq_domain_create_linear(of_fwnode_handle(node), 1, &mpc52xx_gpt_irq_ops, gpt); if (!gpt->irqhost) { - dev_err(gpt->dev, "irq_domain_add_linear() failed\n"); + dev_err(gpt->dev, "irq_domain_create_linear() failed\n"); return; } @@ -280,7 +280,7 @@ static int mpc52xx_gpt_gpio_get(struct gpio_chip *gc, unsigned int gpio) return (in_be32(&gpt->regs->status) >> 8) & 1; } -static void +static int mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v) { struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc); @@ -293,6 +293,8 @@ mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v) raw_spin_lock_irqsave(&gpt->lock, flags); clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_GPIO_MASK, r); raw_spin_unlock_irqrestore(&gpt->lock, flags); + + return 0; } static int mpc52xx_gpt_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -316,17 +318,15 @@ mpc52xx_gpt_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) return 0; } -static void -mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) +static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) { int rc; - /* Only setup GPIO if the device tree claims the GPT is - * a GPIO controller */ - if (!of_find_property(node, "gpio-controller", NULL)) + /* Only setup GPIO if the device claims the GPT is a GPIO controller */ + if (!device_property_present(gpt->dev, "gpio-controller")) return; - gpt->gc.label = kasprintf(GFP_KERNEL, "%pOF", node); + gpt->gc.label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(gpt->dev)); if (!gpt->gc.label) { dev_err(gpt->dev, "out of memory\n"); return; @@ -336,9 +336,9 @@ mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in; gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out; gpt->gc.get = mpc52xx_gpt_gpio_get; - gpt->gc.set = mpc52xx_gpt_gpio_set; + gpt->gc.set_rv = mpc52xx_gpt_gpio_set; gpt->gc.base = -1; - gpt->gc.of_node = node; + gpt->gc.parent = gpt->dev; /* Setup external pin in GPIO mode */ clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK, @@ -351,8 +351,7 @@ mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) dev_dbg(gpt->dev, "%s() complete.\n", __func__); } #else /* defined(CONFIG_GPIOLIB) */ -static void -mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { } +static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) { } #endif /* defined(CONFIG_GPIOLIB) */ /*********************************************************************** @@ -372,7 +371,7 @@ struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq) mutex_lock(&mpc52xx_gpt_list_mutex); list_for_each(pos, &mpc52xx_gpt_list) { gpt = container_of(pos, struct mpc52xx_gpt_priv, list); - if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) { + if (gpt->irqhost && irq == irq_find_mapping(gpt->irqhost, 0)) { mutex_unlock(&mpc52xx_gpt_list_mutex); return gpt; } @@ -647,7 +646,6 @@ static int mpc52xx_wdt_release(struct inode *inode, struct file *file) static const struct file_operations mpc52xx_wdt_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .write = mpc52xx_wdt_write, .unlocked_ioctl = mpc52xx_wdt_ioctl, .compat_ioctl = compat_ptr_ioctl, @@ -722,14 +720,14 @@ static int mpc52xx_gpt_probe(struct platform_device *ofdev) raw_spin_lock_init(&gpt->lock); gpt->dev = &ofdev->dev; - gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); + gpt->ipb_freq = mpc5xxx_get_bus_frequency(&ofdev->dev); gpt->regs = of_iomap(ofdev->dev.of_node, 0); if (!gpt->regs) return -ENOMEM; dev_set_drvdata(&ofdev->dev, gpt); - mpc52xx_gpt_gpio_setup(gpt, ofdev->dev.of_node); + mpc52xx_gpt_gpio_setup(gpt); mpc52xx_gpt_irq_setup(gpt, ofdev->dev.of_node); mutex_lock(&mpc52xx_gpt_list_mutex); @@ -737,8 +735,8 @@ static int mpc52xx_gpt_probe(struct platform_device *ofdev) mutex_unlock(&mpc52xx_gpt_list_mutex); /* check if this device could be a watchdog */ - if (of_get_property(ofdev->dev.of_node, "fsl,has-wdt", NULL) || - of_get_property(ofdev->dev.of_node, "has-wdt", NULL)) { + if (of_property_read_bool(ofdev->dev.of_node, "fsl,has-wdt") || + of_property_read_bool(ofdev->dev.of_node, "has-wdt")) { const u32 *on_boot_wdt; gpt->wdt_mode = MPC52xx_GPT_CAN_WDT; @@ -755,11 +753,6 @@ static int mpc52xx_gpt_probe(struct platform_device *ofdev) return 0; } -static int mpc52xx_gpt_remove(struct platform_device *ofdev) -{ - return -EBUSY; -} - static const struct of_device_id mpc52xx_gpt_match[] = { { .compatible = "fsl,mpc5200-gpt", }, @@ -772,10 +765,10 @@ static const struct of_device_id mpc52xx_gpt_match[] = { static struct platform_driver mpc52xx_gpt_driver = { .driver = { .name = "mpc52xx-gpt", + .suppress_bind_attrs = true, .of_match_table = mpc52xx_gpt_match, }, .probe = mpc52xx_gpt_probe, - .remove = mpc52xx_gpt_remove, }; static int __init mpc52xx_gpt_init(void) diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c deleted file mode 100644 index 48038aaedbd3..000000000000 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ /dev/null @@ -1,581 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * LocalPlus Bus FIFO driver for the Freescale MPC52xx. - * - * Copyright (C) 2009 Secret Lab Technologies Ltd. - * - * Todo: - * - Add support for multiple requests to be queued. - */ - -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/of_platform.h> -#include <linux/spinlock.h> -#include <linux/module.h> -#include <asm/io.h> -#include <asm/mpc52xx.h> -#include <asm/time.h> - -#include <linux/fsl/bestcomm/bestcomm.h> -#include <linux/fsl/bestcomm/bestcomm_priv.h> -#include <linux/fsl/bestcomm/gen_bd.h> - -MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); -MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver"); -MODULE_LICENSE("GPL"); - -#define LPBFIFO_REG_PACKET_SIZE (0x00) -#define LPBFIFO_REG_START_ADDRESS (0x04) -#define LPBFIFO_REG_CONTROL (0x08) -#define LPBFIFO_REG_ENABLE (0x0C) -#define LPBFIFO_REG_BYTES_DONE_STATUS (0x14) -#define LPBFIFO_REG_FIFO_DATA (0x40) -#define LPBFIFO_REG_FIFO_STATUS (0x44) -#define LPBFIFO_REG_FIFO_CONTROL (0x48) -#define LPBFIFO_REG_FIFO_ALARM (0x4C) - -struct mpc52xx_lpbfifo { - struct device *dev; - phys_addr_t regs_phys; - void __iomem *regs; - int irq; - spinlock_t lock; - - struct bcom_task *bcom_tx_task; - struct bcom_task *bcom_rx_task; - struct bcom_task *bcom_cur_task; - - /* Current state data */ - struct mpc52xx_lpbfifo_request *req; - int dma_irqs_enabled; -}; - -/* The MPC5200 has only one fifo, so only need one instance structure */ -static struct mpc52xx_lpbfifo lpbfifo; - -/** - * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred - */ -static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) -{ - size_t transfer_size = req->size - req->pos; - struct bcom_bd *bd; - void __iomem *reg; - u32 *data; - int i; - int bit_fields; - int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); - int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; - int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; - - /* Set and clear the reset bits; is good practice in User Manual */ - out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); - - /* set master enable bit */ - out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001); - if (!dma) { - /* While the FIFO can be setup for transfer sizes as large as - * 16M-1, the FIFO itself is only 512 bytes deep and it does - * not generate interrupts for FIFO full events (only transfer - * complete will raise an IRQ). Therefore when not using - * Bestcomm to drive the FIFO it needs to either be polled, or - * transfers need to constrained to the size of the fifo. - * - * This driver restricts the size of the transfer - */ - if (transfer_size > 512) - transfer_size = 512; - - /* Load the FIFO with data */ - if (write) { - reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA; - data = req->data + req->pos; - for (i = 0; i < transfer_size; i += 4) - out_be32(reg, *data++); - } - - /* Unmask both error and completion irqs */ - out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301); - } else { - /* Choose the correct direction - * - * Configure the watermarks so DMA will always complete correctly. - * It may be worth experimenting with the ALARM value to see if - * there is a performance impact. However, if it is wrong there - * is a risk of DMA not transferring the last chunk of data - */ - if (write) { - out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4); - out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7); - lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task; - } else { - out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff); - out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0); - lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task; - - if (poll_dma) { - if (lpbfifo.dma_irqs_enabled) { - disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task)); - lpbfifo.dma_irqs_enabled = 0; - } - } else { - if (!lpbfifo.dma_irqs_enabled) { - enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task)); - lpbfifo.dma_irqs_enabled = 1; - } - } - } - - bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task); - bd->status = transfer_size; - if (!write) { - /* - * In the DMA read case, the DMA doesn't complete, - * possibly due to incorrect watermarks in the ALARM - * and CONTROL regs. For now instead of trying to - * determine the right watermarks that will make this - * work, just increase the number of bytes the FIFO is - * expecting. - * - * When submitting another operation, the FIFO will get - * reset, so the condition of the FIFO waiting for a - * non-existent 4 bytes will get cleared. - */ - transfer_size += 4; /* BLECH! */ - } - bd->data[0] = req->data_phys + req->pos; - bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL); - - /* error irq & master enabled bit */ - bit_fields = 0x00000201; - - /* Unmask irqs */ - if (write && (!poll_dma)) - bit_fields |= 0x00000100; /* completion irq too */ - out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields); - } - - /* Set transfer size, width, chip select and READ mode */ - out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS, - req->offset + req->pos); - out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size); - - bit_fields = req->cs << 24 | 0x000008; - if (!write) - bit_fields |= 0x010000; /* read mode */ - out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields); - - /* Kick it off */ - if (!lpbfifo.req->defer_xfer_start) - out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01); - if (dma) - bcom_enable(lpbfifo.bcom_cur_task); -} - -/** - * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO - * - * On transmit, the dma completion irq triggers before the fifo completion - * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm - * task completion irq because everything is not really done until the LPB FIFO - * completion irq triggers. - * - * In other words: - * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on - * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this - * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings. - * - * Reasons for entering this routine: - * 1) PIO mode rx and tx completion irq - * 2) DMA interrupt mode tx completion irq - * 3) DMA polled mode tx - * - * Exit conditions: - * 1) Transfer aborted - * 2) FIFO complete without DMA; more data to do - * 3) FIFO complete without DMA; all data transferred - * 4) FIFO complete using DMA - * - * Condition 1 can occur regardless of whether or not DMA is used. - * It requires executing the callback to report the error and exiting - * immediately. - * - * Condition 2 requires programming the FIFO with the next block of data - * - * Condition 3 requires executing the callback to report completion - * - * Condition 4 means the same as 3, except that we also retrieve the bcom - * buffer so DMA doesn't get clogged up. - * - * To make things trickier, the spinlock must be dropped before - * executing the callback, otherwise we could end up with a deadlock - * or nested spinlock condition. The out path is non-trivial, so - * extra fiddling is done to make sure all paths lead to the same - * outbound code. - */ -static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id) -{ - struct mpc52xx_lpbfifo_request *req; - u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS); - void __iomem *reg; - u32 *data; - int count, i; - int do_callback = 0; - u32 ts; - unsigned long flags; - int dma, write, poll_dma; - - spin_lock_irqsave(&lpbfifo.lock, flags); - ts = mftb(); - - req = lpbfifo.req; - if (!req) { - spin_unlock_irqrestore(&lpbfifo.lock, flags); - pr_err("bogus LPBFIFO IRQ\n"); - return IRQ_HANDLED; - } - - dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); - write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; - poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; - - if (dma && !write) { - spin_unlock_irqrestore(&lpbfifo.lock, flags); - pr_err("bogus LPBFIFO IRQ (dma and not writing)\n"); - return IRQ_HANDLED; - } - - if ((status & 0x01) == 0) { - goto out; - } - - /* check abort bit */ - if (status & 0x10) { - out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); - do_callback = 1; - goto out; - } - - /* Read result from hardware */ - count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS); - count &= 0x00ffffff; - - if (!dma && !write) { - /* copy the data out of the FIFO */ - reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA; - data = req->data + req->pos; - for (i = 0; i < count; i += 4) - *data++ = in_be32(reg); - } - - /* Update transfer position and count */ - req->pos += count; - - /* Decide what to do next */ - if (req->size - req->pos) - mpc52xx_lpbfifo_kick(req); /* more work to do */ - else - do_callback = 1; - - out: - /* Clear the IRQ */ - out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01); - - if (dma && (status & 0x11)) { - /* - * Count the DMA as complete only when the FIFO completion - * status or abort bits are set. - * - * (status & 0x01) should always be the case except sometimes - * when using polled DMA. - * - * (status & 0x10) {transfer aborted}: This case needs more - * testing. - */ - bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL); - } - req->last_byte = ((u8 *)req->data)[req->size - 1]; - - /* When the do_callback flag is set; it means the transfer is finished - * so set the FIFO as idle */ - if (do_callback) - lpbfifo.req = NULL; - - if (irq != 0) /* don't increment on polled case */ - req->irq_count++; - - req->irq_ticks += mftb() - ts; - spin_unlock_irqrestore(&lpbfifo.lock, flags); - - /* Spinlock is released; it is now safe to call the callback */ - if (do_callback && req->callback) - req->callback(req); - - return IRQ_HANDLED; -} - -/** - * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task - * - * Only used when receiving data. - */ -static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id) -{ - struct mpc52xx_lpbfifo_request *req; - unsigned long flags; - u32 status; - u32 ts; - - spin_lock_irqsave(&lpbfifo.lock, flags); - ts = mftb(); - - req = lpbfifo.req; - if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) { - spin_unlock_irqrestore(&lpbfifo.lock, flags); - return IRQ_HANDLED; - } - - if (irq != 0) /* don't increment on polled case */ - req->irq_count++; - - if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) { - spin_unlock_irqrestore(&lpbfifo.lock, flags); - - req->buffer_not_done_cnt++; - if ((req->buffer_not_done_cnt % 1000) == 0) - pr_err("transfer stalled\n"); - - return IRQ_HANDLED; - } - - bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL); - - req->last_byte = ((u8 *)req->data)[req->size - 1]; - - req->pos = status & 0x00ffffff; - - /* Mark the FIFO as idle */ - lpbfifo.req = NULL; - - /* Release the lock before calling out to the callback. */ - req->irq_ticks += mftb() - ts; - spin_unlock_irqrestore(&lpbfifo.lock, flags); - - if (req->callback) - req->callback(req); - - return IRQ_HANDLED; -} - -/** - * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion - */ -void mpc52xx_lpbfifo_poll(void) -{ - struct mpc52xx_lpbfifo_request *req = lpbfifo.req; - int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); - int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; - - /* - * For more information, see comments on the "Fat Lady" - */ - if (dma && write) - mpc52xx_lpbfifo_irq(0, NULL); - else - mpc52xx_lpbfifo_bcom_irq(0, NULL); -} -EXPORT_SYMBOL(mpc52xx_lpbfifo_poll); - -/** - * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request. - * @req: Pointer to request structure - */ -int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req) -{ - unsigned long flags; - - if (!lpbfifo.regs) - return -ENODEV; - - spin_lock_irqsave(&lpbfifo.lock, flags); - - /* If the req pointer is already set, then a transfer is in progress */ - if (lpbfifo.req) { - spin_unlock_irqrestore(&lpbfifo.lock, flags); - return -EBUSY; - } - - /* Setup the transfer */ - lpbfifo.req = req; - req->irq_count = 0; - req->irq_ticks = 0; - req->buffer_not_done_cnt = 0; - req->pos = 0; - - mpc52xx_lpbfifo_kick(req); - spin_unlock_irqrestore(&lpbfifo.lock, flags); - return 0; -} -EXPORT_SYMBOL(mpc52xx_lpbfifo_submit); - -int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req) -{ - unsigned long flags; - - if (!lpbfifo.regs) - return -ENODEV; - - spin_lock_irqsave(&lpbfifo.lock, flags); - - /* - * If the req pointer is already set and a transfer was - * started on submit, then this transfer is in progress - */ - if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) { - spin_unlock_irqrestore(&lpbfifo.lock, flags); - return -EBUSY; - } - - /* - * If the req was previously submitted but not - * started, start it now - */ - if (lpbfifo.req && lpbfifo.req == req && - lpbfifo.req->defer_xfer_start) { - out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01); - } - - spin_unlock_irqrestore(&lpbfifo.lock, flags); - return 0; -} -EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer); - -void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) -{ - unsigned long flags; - - spin_lock_irqsave(&lpbfifo.lock, flags); - if (lpbfifo.req == req) { - /* Put it into reset and clear the state */ - bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task); - bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task); - out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); - lpbfifo.req = NULL; - } - spin_unlock_irqrestore(&lpbfifo.lock, flags); -} -EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); - -static int mpc52xx_lpbfifo_probe(struct platform_device *op) -{ - struct resource res; - int rc = -ENOMEM; - - if (lpbfifo.dev != NULL) - return -ENOSPC; - - lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0); - if (!lpbfifo.irq) - return -ENODEV; - - if (of_address_to_resource(op->dev.of_node, 0, &res)) - return -ENODEV; - lpbfifo.regs_phys = res.start; - lpbfifo.regs = of_iomap(op->dev.of_node, 0); - if (!lpbfifo.regs) - return -ENOMEM; - - spin_lock_init(&lpbfifo.lock); - - /* Put FIFO into reset */ - out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); - - /* Register the interrupt handler */ - rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0, - "mpc52xx-lpbfifo", &lpbfifo); - if (rc) - goto err_irq; - - /* Request the Bestcomm receive (fifo --> memory) task and IRQ */ - lpbfifo.bcom_rx_task = - bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, - BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC, - 16*1024*1024); - if (!lpbfifo.bcom_rx_task) - goto err_bcom_rx; - - rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), - mpc52xx_lpbfifo_bcom_irq, 0, - "mpc52xx-lpbfifo-rx", &lpbfifo); - if (rc) - goto err_bcom_rx_irq; - - lpbfifo.dma_irqs_enabled = 1; - - /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */ - lpbfifo.bcom_tx_task = - bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, - BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC); - if (!lpbfifo.bcom_tx_task) - goto err_bcom_tx; - - lpbfifo.dev = &op->dev; - return 0; - - err_bcom_tx: - free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); - err_bcom_rx_irq: - bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); - err_bcom_rx: - err_irq: - iounmap(lpbfifo.regs); - lpbfifo.regs = NULL; - - dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n"); - return -ENODEV; -} - - -static int mpc52xx_lpbfifo_remove(struct platform_device *op) -{ - if (lpbfifo.dev != &op->dev) - return 0; - - /* Put FIFO in reset */ - out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); - - /* Release the bestcomm transmit task */ - free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo); - bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task); - - /* Release the bestcomm receive task */ - free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); - bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); - - free_irq(lpbfifo.irq, &lpbfifo); - iounmap(lpbfifo.regs); - lpbfifo.regs = NULL; - lpbfifo.dev = NULL; - - return 0; -} - -static const struct of_device_id mpc52xx_lpbfifo_match[] = { - { .compatible = "fsl,mpc5200-lpbfifo", }, - {}, -}; -MODULE_DEVICE_TABLE(of, mpc52xx_lpbfifo_match); - -static struct platform_driver mpc52xx_lpbfifo_driver = { - .driver = { - .name = "mpc52xx-lpbfifo", - .of_match_table = mpc52xx_lpbfifo_match, - }, - .probe = mpc52xx_lpbfifo_probe, - .remove = mpc52xx_lpbfifo_remove, -}; -module_platform_driver(mpc52xx_lpbfifo_driver); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c index 859e2818c43d..0ca4401ba781 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c @@ -327,14 +327,13 @@ mpc52xx_pci_setup(struct pci_controller *hose, static void mpc52xx_pci_fixup_resources(struct pci_dev *dev) { - int i; + struct resource *res; pr_debug("%s() %.4x:%.4x\n", __func__, dev->vendor, dev->device); /* We don't rely on boot loader for PCI and resets all devices */ - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - struct resource *res = &dev->resource[i]; + pci_dev_for_each_resource(dev, res) { if (res->end > res->start) { /* Only valid resources */ res->end -= res->start; res->start = 0; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 1e0a5e9644dc..eb6a4e745c08 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -446,14 +446,14 @@ void __init mpc52xx_init_irq(void) * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ - mpc52xx_irqhost = irq_domain_add_linear(picnode, + mpc52xx_irqhost = irq_domain_create_linear(of_fwnode_handle(picnode), MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, NULL); if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); - irq_set_default_host(mpc52xx_irqhost); + irq_set_default_domain(mpc52xx_irqhost); pr_info("MPC52xx PIC is up and running!\n"); } @@ -515,5 +515,5 @@ unsigned int mpc52xx_get_irq(void) return 0; } - return irq_linear_revmap(mpc52xx_irqhost, irq); + return irq_find_mapping(mpc52xx_irqhost, irq); } diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index 549b3629e39a..f0c31ae15da5 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c @@ -60,7 +60,7 @@ int mpc52xx_set_wakeup_gpio(u8 pin, u8 level) int mpc52xx_pm_prepare(void) { struct device_node *np; - const struct of_device_id immr_ids[] = { + static const struct of_device_id immr_ids[] = { { .compatible = "fsl,mpc5200-immr", }, { .compatible = "fsl,mpc5200b-immr", }, { .type = "soc", .compatible = "mpc5200", }, /* lite5200 */ diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig index 1af81de1c4e6..1824536cf6f2 100644 --- a/arch/powerpc/platforms/82xx/Kconfig +++ b/arch/powerpc/platforms/82xx/Kconfig @@ -2,35 +2,14 @@ menuconfig PPC_82xx bool "82xx-based boards (PQ II)" depends on PPC_BOOK3S_32 - -if PPC_82xx - -config MPC8272_ADS - bool "Freescale MPC8272 ADS" - select DEFAULT_UIMAGE - select PQ2ADS - select 8272 - select 8260 select FSL_SOC - select PQ2_ADS_PCI_PIC if PCI - help - This option enables support for the MPC8272 ADS board -config PQ2FADS - bool "Freescale PQ2FADS" - select DEFAULT_UIMAGE - select PQ2ADS - select 8260 - select FSL_SOC - select PQ2_ADS_PCI_PIC if PCI - help - This option enables support for the PQ2FADS board +if PPC_82xx config EP8248E bool "Embedded Planet EP8248E (a.k.a. CWH-PPC-8248N-VE)" - select 8272 - select 8260 - select FSL_SOC + select CPM2 + select PPC_INDIRECT_PCI if PCI select PHYLIB if NETDEVICES select MDIO_BITBANG if PHYLIB help @@ -41,32 +20,9 @@ config EP8248E config MGCOGE bool "Keymile MGCOGE" - select 8272 - select 8260 - select FSL_SOC + select CPM2 + select PPC_INDIRECT_PCI if PCI help This enables support for the Keymile MGCOGE board. endif - -config PQ2ADS - bool - -config 8260 - bool - depends on PPC_BOOK3S_32 - select CPM2 - help - The MPC8260 is a typical embedded CPU made by Freescale. Selecting - this option means that you wish to build a kernel for a machine with - an 8260 class CPU. - -config 8272 - bool - select 8260 - help - The MPC8272 CPM has a different internal dpram setup than other CPM2 - devices - -config PQ2_ADS_PCI_PIC - bool diff --git a/arch/powerpc/platforms/82xx/Makefile b/arch/powerpc/platforms/82xx/Makefile index 8d713c601bf2..4fa43a5cd582 100644 --- a/arch/powerpc/platforms/82xx/Makefile +++ b/arch/powerpc/platforms/82xx/Makefile @@ -2,9 +2,6 @@ # # Makefile for the PowerPC 82xx linux kernel. # -obj-$(CONFIG_MPC8272_ADS) += mpc8272_ads.o obj-$(CONFIG_CPM2) += pq2.o -obj-$(CONFIG_PQ2_ADS_PCI_PIC) += pq2ads-pci-pic.o -obj-$(CONFIG_PQ2FADS) += pq2fads.o obj-$(CONFIG_EP8248E) += ep8248e.o obj-$(CONFIG_MGCOGE) += km82xx.o diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 28e627f8a320..8f918916e631 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -13,13 +13,13 @@ #include <linux/of_mdio.h> #include <linux/slab.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/io.h> #include <asm/cpm2.h> #include <asm/udbg.h> #include <asm/machdep.h> #include <asm/time.h> -#include <asm/mpc8260.h> #include <sysdev/fsl_soc.h> #include <sysdev/cpm2_pic.h> @@ -128,7 +128,7 @@ static int ep8248e_mdio_probe(struct platform_device *ofdev) bus->name = "ep8248e-mdio-bitbang"; bus->parent = &ofdev->dev; - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); ret = of_mdiobus_register(bus, ofdev->dev.of_node); if (ret) @@ -140,12 +140,6 @@ err_free_bus: return ret; } -static int ep8248e_mdio_remove(struct platform_device *ofdev) -{ - BUG(); - return 0; -} - static const struct of_device_id ep8248e_mdio_match[] = { { .compatible = "fsl,ep8248e-mdio-bitbang", @@ -157,9 +151,9 @@ static struct platform_driver ep8248e_mdio_driver = { .driver = { .name = "ep8248e-mdio-bitbang", .of_match_table = ep8248e_mdio_match, + .suppress_bind_attrs = true, }, .probe = ep8248e_mdio_probe, - .remove = ep8248e_mdio_remove, }; struct cpm_pin { @@ -301,22 +295,13 @@ static int __init declare_of_platform_devices(void) } machine_device_initcall(ep8248e, declare_of_platform_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init ep8248e_probe(void) -{ - return of_machine_is_compatible("fsl,ep8248e"); -} - define_machine(ep8248e) { .name = "Embedded Planet EP8248E", - .probe = ep8248e_probe, + .compatible = "fsl,ep8248e", .setup_arch = ep8248e_setup_arch, .init_IRQ = ep8248e_pic_init, .get_irq = cpm2_get_irq, - .calibrate_decr = generic_calibrate_decr, .restart = pq2_restart, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c index 1c8bbf4251d9..99f0f0f41876 100644 --- a/arch/powerpc/platforms/82xx/km82xx.c +++ b/arch/powerpc/platforms/82xx/km82xx.c @@ -19,7 +19,6 @@ #include <asm/udbg.h> #include <asm/machdep.h> #include <linux/time.h> -#include <asm/mpc8260.h> #include <sysdev/fsl_soc.h> #include <sysdev/cpm2_pic.h> @@ -28,15 +27,15 @@ static void __init km82xx_pic_init(void) { - struct device_node *np = of_find_compatible_node(NULL, NULL, - "fsl,pq2-pic"); + struct device_node *np __free(device_node); + np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic"); + if (!np) { pr_err("PIC init: can not find cpm-pic node\n"); return; } cpm2_pic_init(np); - of_node_put(np); } struct cpm_pin { @@ -188,22 +187,13 @@ static int __init declare_of_platform_devices(void) } machine_device_initcall(km82xx, declare_of_platform_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init km82xx_probe(void) -{ - return of_machine_is_compatible("keymile,km82xx"); -} - define_machine(km82xx) { .name = "Keymile km82xx", - .probe = km82xx_probe, + .compatible = "keymile,km82xx", .setup_arch = km82xx_setup_arch, .init_IRQ = km82xx_pic_init, .get_irq = cpm2_get_irq, - .calibrate_decr = generic_calibrate_decr, .restart = pq2_restart, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/82xx/m82xx_pci.h b/arch/powerpc/platforms/82xx/m82xx_pci.h deleted file mode 100644 index d07c4d7606f6..000000000000 --- a/arch/powerpc/platforms/82xx/m82xx_pci.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _PPC_KERNEL_M82XX_PCI_H -#define _PPC_KERNEL_M82XX_PCI_H - -/* - */ - -#define SIU_INT_IRQ1 ((uint)0x13 + CPM_IRQ_OFFSET) - -#ifndef _IO_BASE -#define _IO_BASE isa_io_base -#endif - -#endif /* _PPC_KERNEL_M8260_PCI_H */ diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c deleted file mode 100644 index 0b5b9dec16d5..000000000000 --- a/arch/powerpc/platforms/82xx/mpc8272_ads.c +++ /dev/null @@ -1,213 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * MPC8272 ADS board support - * - * Copyright 2007 Freescale Semiconductor, Inc. - * Author: Scott Wood <scottwood@freescale.com> - * - * Based on code by Vitaly Bordug <vbordug@ru.mvista.com> - * Copyright (c) 2006 MontaVista Software, Inc. - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/fsl_devices.h> -#include <linux/of_address.h> -#include <linux/of_fdt.h> -#include <linux/of_platform.h> -#include <linux/io.h> - -#include <asm/cpm2.h> -#include <asm/udbg.h> -#include <asm/machdep.h> -#include <asm/time.h> - -#include <platforms/82xx/pq2.h> - -#include <sysdev/fsl_soc.h> -#include <sysdev/cpm2_pic.h> - -#include "pq2.h" - -static void __init mpc8272_ads_pic_init(void) -{ - struct device_node *np = of_find_compatible_node(NULL, NULL, - "fsl,cpm2-pic"); - if (!np) { - printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); - return; - } - - cpm2_pic_init(np); - of_node_put(np); - - /* Initialize stuff for the 82xx CPLD IC and install demux */ - pq2ads_pci_init_irq(); -} - -struct cpm_pin { - int port, pin, flags; -}; - -static struct cpm_pin mpc8272_ads_pins[] = { - /* SCC1 */ - {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, - {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* SCC4 */ - {3, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {3, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* FCC1 */ - {0, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {0, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {0, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {0, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {0, 18, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {0, 19, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {0, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {0, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {0, 26, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, - {0, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, - {0, 28, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, - {0, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, - {0, 30, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, - {0, 31, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, - {2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* FCC2 */ - {1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, - {1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {2, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* I2C */ - {3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, - {3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, - - /* USB */ - {2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, -}; - -static void __init init_ioports(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mpc8272_ads_pins); i++) { - struct cpm_pin *pin = &mpc8272_ads_pins[i]; - cpm2_set_pin(pin->port, pin->pin, pin->flags); - } - - cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK8, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK8, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_SCC4, CPM_BRG4, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_SCC4, CPM_BRG4, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK11, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK15, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK16, CPM_CLK_TX); -} - -static void __init mpc8272_ads_setup_arch(void) -{ - struct device_node *np; - __be32 __iomem *bcsr; - - if (ppc_md.progress) - ppc_md.progress("mpc8272_ads_setup_arch()", 0); - - cpm2_reset(); - - np = of_find_compatible_node(NULL, NULL, "fsl,mpc8272ads-bcsr"); - if (!np) { - printk(KERN_ERR "No bcsr in device tree\n"); - return; - } - - bcsr = of_iomap(np, 0); - of_node_put(np); - if (!bcsr) { - printk(KERN_ERR "Cannot map BCSR registers\n"); - return; - } - -#define BCSR1_FETHIEN 0x08000000 -#define BCSR1_FETH_RST 0x04000000 -#define BCSR1_RS232_EN1 0x02000000 -#define BCSR1_RS232_EN2 0x01000000 -#define BCSR3_USB_nEN 0x80000000 -#define BCSR3_FETHIEN2 0x10000000 -#define BCSR3_FETH2_RST 0x08000000 - - clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN); - setbits32(&bcsr[1], BCSR1_FETH_RST); - - clrbits32(&bcsr[3], BCSR3_FETHIEN2); - setbits32(&bcsr[3], BCSR3_FETH2_RST); - - clrbits32(&bcsr[3], BCSR3_USB_nEN); - - iounmap(bcsr); - - init_ioports(); - - if (ppc_md.progress) - ppc_md.progress("mpc8272_ads_setup_arch(), finish", 0); -} - -static const struct of_device_id of_bus_ids[] __initconst = { - { .name = "soc", }, - { .name = "cpm", }, - { .name = "localbus", }, - {}, -}; - -static int __init declare_of_platform_devices(void) -{ - /* Publish the QE devices */ - of_platform_bus_probe(NULL, of_bus_ids, NULL); - return 0; -} -machine_device_initcall(mpc8272_ads, declare_of_platform_devices); - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc8272_ads_probe(void) -{ - return of_machine_is_compatible("fsl,mpc8272ads"); -} - -define_machine(mpc8272_ads) -{ - .name = "Freescale MPC8272 ADS", - .probe = mpc8272_ads_probe, - .setup_arch = mpc8272_ads_setup_arch, - .discover_phbs = pq2_init_pci, - .init_IRQ = mpc8272_ads_pic_init, - .get_irq = cpm2_get_irq, - .calibrate_decr = generic_calibrate_decr, - .restart = pq2_restart, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c index 3b5cb39a564c..391d72a2e09d 100644 --- a/arch/powerpc/platforms/82xx/pq2.c +++ b/arch/powerpc/platforms/82xx/pq2.c @@ -32,49 +32,3 @@ void __noreturn pq2_restart(char *cmd) panic("Restart failed\n"); } NOKPROBE_SYMBOL(pq2_restart) - -#ifdef CONFIG_PCI -static int pq2_pci_exclude_device(struct pci_controller *hose, - u_char bus, u8 devfn) -{ - if (bus == 0 && PCI_SLOT(devfn) == 0) - return PCIBIOS_DEVICE_NOT_FOUND; - else - return PCIBIOS_SUCCESSFUL; -} - -static void __init pq2_pci_add_bridge(struct device_node *np) -{ - struct pci_controller *hose; - struct resource r; - - if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b) - goto err; - - pci_add_flags(PCI_REASSIGN_ALL_BUS); - - hose = pcibios_alloc_controller(np); - if (!hose) - return; - - hose->dn = np; - - setup_indirect_pci(hose, r.start + 0x100, r.start + 0x104, 0); - pci_process_bridge_OF_ranges(hose, np, 1); - - return; - -err: - printk(KERN_ERR "No valid PCI reg property in device tree\n"); -} - -void __init pq2_init_pci(void) -{ - struct device_node *np; - - ppc_md.pci_exclude_device = pq2_pci_exclude_device; - - for_each_compatible_node(np, NULL, "fsl,pq2-pci") - pq2_pci_add_bridge(np); -} -#endif diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c deleted file mode 100644 index cf3210042a2e..000000000000 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ /dev/null @@ -1,172 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * PQ2 ADS-style PCI interrupt controller - * - * Copyright 2007 Freescale Semiconductor, Inc. - * Author: Scott Wood <scottwood@freescale.com> - * - * Loosely based on mpc82xx ADS support by Vitaly Bordug <vbordug@ru.mvista.com> - * Copyright (c) 2006 MontaVista Software, Inc. - */ - -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/irq.h> -#include <linux/types.h> -#include <linux/slab.h> -#include <linux/of_irq.h> - -#include <asm/io.h> -#include <asm/cpm2.h> - -#include "pq2.h" - -static DEFINE_RAW_SPINLOCK(pci_pic_lock); - -struct pq2ads_pci_pic { - struct device_node *node; - struct irq_domain *host; - - struct { - u32 stat; - u32 mask; - } __iomem *regs; -}; - -#define NUM_IRQS 32 - -static void pq2ads_pci_mask_irq(struct irq_data *d) -{ - struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); - int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; - - if (irq != -1) { - unsigned long flags; - raw_spin_lock_irqsave(&pci_pic_lock, flags); - - setbits32(&priv->regs->mask, 1 << irq); - mb(); - - raw_spin_unlock_irqrestore(&pci_pic_lock, flags); - } -} - -static void pq2ads_pci_unmask_irq(struct irq_data *d) -{ - struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); - int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; - - if (irq != -1) { - unsigned long flags; - - raw_spin_lock_irqsave(&pci_pic_lock, flags); - clrbits32(&priv->regs->mask, 1 << irq); - raw_spin_unlock_irqrestore(&pci_pic_lock, flags); - } -} - -static struct irq_chip pq2ads_pci_ic = { - .name = "PQ2 ADS PCI", - .irq_mask = pq2ads_pci_mask_irq, - .irq_mask_ack = pq2ads_pci_mask_irq, - .irq_ack = pq2ads_pci_mask_irq, - .irq_unmask = pq2ads_pci_unmask_irq, - .irq_enable = pq2ads_pci_unmask_irq, - .irq_disable = pq2ads_pci_mask_irq -}; - -static void pq2ads_pci_irq_demux(struct irq_desc *desc) -{ - struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); - u32 stat, mask, pend; - int bit; - - for (;;) { - stat = in_be32(&priv->regs->stat); - mask = in_be32(&priv->regs->mask); - - pend = stat & ~mask; - - if (!pend) - break; - - for (bit = 0; pend != 0; ++bit, pend <<= 1) { - if (pend & 0x80000000) - generic_handle_domain_irq(priv->host, bit); - } - } -} - -static int pci_pic_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - irq_set_status_flags(virq, IRQ_LEVEL); - irq_set_chip_data(virq, h->host_data); - irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); - return 0; -} - -static const struct irq_domain_ops pci_pic_host_ops = { - .map = pci_pic_host_map, -}; - -int __init pq2ads_pci_init_irq(void) -{ - struct pq2ads_pci_pic *priv; - struct irq_domain *host; - struct device_node *np; - int ret = -ENODEV; - int irq; - - np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic"); - if (!np) { - printk(KERN_ERR "No pci pic node in device tree.\n"); - goto out; - } - - irq = irq_of_parse_and_map(np, 0); - if (!irq) { - printk(KERN_ERR "No interrupt in pci pic node.\n"); - goto out_put_node; - } - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto out_unmap_irq; - } - - /* PCI interrupt controller registers: status and mask */ - priv->regs = of_iomap(np, 0); - if (!priv->regs) { - printk(KERN_ERR "Cannot map PCI PIC registers.\n"); - goto out_free_kmalloc; - } - - /* mask all PCI interrupts */ - out_be32(&priv->regs->mask, ~0); - mb(); - - host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv); - if (!host) { - ret = -ENOMEM; - goto out_unmap_regs; - } - - priv->host = host; - irq_set_handler_data(irq, priv); - irq_set_chained_handler(irq, pq2ads_pci_irq_demux); - ret = 0; - goto out_put_node; - -out_unmap_regs: - iounmap(priv->regs); -out_free_kmalloc: - kfree(priv); -out_unmap_irq: - irq_dispose_mapping(irq); -out_put_node: - of_node_put(np); -out: - return ret; -} diff --git a/arch/powerpc/platforms/82xx/pq2ads.h b/arch/powerpc/platforms/82xx/pq2ads.h deleted file mode 100644 index 9d0bf744945c..000000000000 --- a/arch/powerpc/platforms/82xx/pq2ads.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * PQ2/mpc8260 board-specific stuff - * - * A collection of structures, addresses, and values associated with - * the Freescale MPC8260ADS/MPC8266ADS-PCI boards. - * Copied from the RPX-Classic and SBS8260 stuff. - * - * Author: Vitaly Bordug <vbordug@ru.mvista.com> - * - * Originally written by Dan Malek for Motorola MPC8260 family - * - * Copyright (c) 2001 Dan Malek <dan@embeddedalley.com> - * Copyright (c) 2006 MontaVista Software, Inc. - */ - -#ifdef __KERNEL__ -#ifndef __MACH_ADS8260_DEFS -#define __MACH_ADS8260_DEFS - -#include <linux/seq_file.h> - -/* The ADS8260 has 16, 32-bit wide control/status registers, accessed - * only on word boundaries. - * Not all are used (yet), or are interesting to us (yet). - */ - -/* Things of interest in the CSR. - */ -#define BCSR0_LED0 ((uint)0x02000000) /* 0 == on */ -#define BCSR0_LED1 ((uint)0x01000000) /* 0 == on */ -#define BCSR1_FETHIEN ((uint)0x08000000) /* 0 == enable*/ -#define BCSR1_FETH_RST ((uint)0x04000000) /* 0 == reset */ -#define BCSR1_RS232_EN1 ((uint)0x02000000) /* 0 ==enable */ -#define BCSR1_RS232_EN2 ((uint)0x01000000) /* 0 ==enable */ -#define BCSR3_FETHIEN2 ((uint)0x10000000) /* 0 == enable*/ -#define BCSR3_FETH2_RST ((uint)0x80000000) /* 0 == reset */ - -#endif /* __MACH_ADS8260_DEFS */ -#endif /* __KERNEL__ */ diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c deleted file mode 100644 index ac9113d524af..000000000000 --- a/arch/powerpc/platforms/82xx/pq2fads.c +++ /dev/null @@ -1,191 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * PQ2FADS board support - * - * Copyright 2007 Freescale Semiconductor, Inc. - * Author: Scott Wood <scottwood@freescale.com> - * - * Loosely based on mp82xx ADS support by Vitaly Bordug <vbordug@ru.mvista.com> - * Copyright (c) 2006 MontaVista Software, Inc. - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/fsl_devices.h> -#include <linux/of_address.h> -#include <linux/of_fdt.h> -#include <linux/of_platform.h> - -#include <asm/io.h> -#include <asm/cpm2.h> -#include <asm/udbg.h> -#include <asm/machdep.h> -#include <asm/time.h> - -#include <sysdev/fsl_soc.h> -#include <sysdev/cpm2_pic.h> - -#include "pq2ads.h" -#include "pq2.h" - -static void __init pq2fads_pic_init(void) -{ - struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); - if (!np) { - printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); - return; - } - - cpm2_pic_init(np); - of_node_put(np); - - /* Initialize stuff for the 82xx CPLD IC and install demux */ - pq2ads_pci_init_irq(); -} - -struct cpm_pin { - int port, pin, flags; -}; - -static struct cpm_pin pq2fads_pins[] = { - /* SCC1 */ - {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, - {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* SCC2 */ - {3, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {3, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* FCC2 */ - {1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, - {1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* FCC3 */ - {1, 4, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 6, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 7, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 9, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 12, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 13, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 14, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 15, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, -}; - -static void __init init_ioports(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(pq2fads_pins); i++) { - struct cpm_pin *pin = &pq2fads_pins[i]; - cpm2_set_pin(pin->port, pin->pin, pin->flags); - } - - cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK15, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK16, CPM_CLK_TX); -} - -static void __init pq2fads_setup_arch(void) -{ - struct device_node *np; - __be32 __iomem *bcsr; - - if (ppc_md.progress) - ppc_md.progress("pq2fads_setup_arch()", 0); - - cpm2_reset(); - - np = of_find_compatible_node(NULL, NULL, "fsl,pq2fads-bcsr"); - if (!np) { - printk(KERN_ERR "No fsl,pq2fads-bcsr in device tree\n"); - return; - } - - bcsr = of_iomap(np, 0); - of_node_put(np); - if (!bcsr) { - printk(KERN_ERR "Cannot map BCSR registers\n"); - return; - } - - /* Enable the serial and ethernet ports */ - - clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN); - setbits32(&bcsr[1], BCSR1_FETH_RST); - - clrbits32(&bcsr[3], BCSR3_FETHIEN2); - setbits32(&bcsr[3], BCSR3_FETH2_RST); - - iounmap(bcsr); - - init_ioports(); - - /* Enable external IRQs */ - clrbits32(&cpm2_immr->im_siu_conf.siu_82xx.sc_siumcr, 0x0c000000); - - if (ppc_md.progress) - ppc_md.progress("pq2fads_setup_arch(), finish", 0); -} - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init pq2fads_probe(void) -{ - return of_machine_is_compatible("fsl,pq2fads"); -} - -static const struct of_device_id of_bus_ids[] __initconst = { - { .name = "soc", }, - { .name = "cpm", }, - { .name = "localbus", }, - {}, -}; - -static int __init declare_of_platform_devices(void) -{ - /* Publish the QE devices */ - of_platform_bus_probe(NULL, of_bus_ids, NULL); - return 0; -} -machine_device_initcall(pq2fads, declare_of_platform_devices); - -define_machine(pq2fads) -{ - .name = "Freescale PQ2FADS", - .probe = pq2fads_probe, - .setup_arch = pq2fads_setup_arch, - .discover_phbs = pq2_init_pci, - .init_IRQ = pq2fads_pic_init, - .get_irq = cpm2_get_irq, - .calibrate_decr = generic_calibrate_decr, - .restart = pq2_restart, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig index bee119725f61..d355ad40995f 100644 --- a/arch/powerpc/platforms/83xx/Kconfig +++ b/arch/powerpc/platforms/83xx/Kconfig @@ -25,13 +25,6 @@ config MPC831x_RDB help This option enables support for the MPC8313 RDB and MPC8315 RDB boards. -config MPC832x_MDS - bool "Freescale MPC832x MDS" - select DEFAULT_UIMAGE - select PPC_MPC832x - help - This option enables support for the MPC832x MDS evaluation board. - config MPC832x_RDB bool "Freescale MPC832x RDB" select DEFAULT_UIMAGE @@ -39,18 +32,6 @@ config MPC832x_RDB help This option enables support for the MPC8323 RDB board. -config MPC834x_MDS - bool "Freescale MPC834x MDS" - select DEFAULT_UIMAGE - select PPC_MPC834x - help - This option enables support for the MPC 834x MDS evaluation board. - - Be aware that PCI buses can only function when MDS board is plugged - into the PIB (Platform IO Board) board from Freescale which provide - 3 PCI slots. The PIBs PCI initialization is the bootloader's - responsibility. - config MPC834x_ITX bool "Freescale MPC834x ITX" select DEFAULT_UIMAGE @@ -61,12 +42,6 @@ config MPC834x_ITX Be aware that PCI initialization is the bootloader's responsibility. -config MPC836x_MDS - bool "Freescale MPC836x MDS" - select DEFAULT_UIMAGE - help - This option enables support for the MPC836x MDS Processor Board. - config MPC836x_RDK bool "Freescale/Logic MPC836x RDK" select DEFAULT_UIMAGE @@ -76,13 +51,6 @@ config MPC836x_RDK This option enables support for the MPC836x RDK Processor Board, also known as ZOOM PowerQUICC Kit. -config MPC837x_MDS - bool "Freescale MPC837x MDS" - select DEFAULT_UIMAGE - select PPC_MPC837x - help - This option enables support for the MPC837x MDS Processor Board. - config MPC837x_RDB bool "Freescale MPC837x RDB/WLAN" select DEFAULT_UIMAGE diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile index 41cb5f842eff..6fc3dba943da 100644 --- a/arch/powerpc/platforms/83xx/Makefile +++ b/arch/powerpc/platforms/83xx/Makefile @@ -2,18 +2,17 @@ # # Makefile for the PowerPC 83xx linux kernel. # -obj-y := misc.o usb.o +obj-y := misc.o obj-$(CONFIG_SUSPEND) += suspend.o suspend-asm.o obj-$(CONFIG_MCU_MPC8349EMITX) += mcu_mpc8349emitx.o obj-$(CONFIG_MPC830x_RDB) += mpc830x_rdb.o obj-$(CONFIG_MPC831x_RDB) += mpc831x_rdb.o obj-$(CONFIG_MPC832x_RDB) += mpc832x_rdb.o -obj-$(CONFIG_MPC834x_MDS) += mpc834x_mds.o obj-$(CONFIG_MPC834x_ITX) += mpc834x_itx.o -obj-$(CONFIG_MPC836x_MDS) += mpc836x_mds.o obj-$(CONFIG_MPC836x_RDK) += mpc836x_rdk.o -obj-$(CONFIG_MPC832x_MDS) += mpc832x_mds.o -obj-$(CONFIG_MPC837x_MDS) += mpc837x_mds.o obj-$(CONFIG_MPC837x_RDB) += mpc837x_rdb.o obj-$(CONFIG_ASP834x) += asp834x.o obj-$(CONFIG_KMETER1) += km83xx.o +obj-$(CONFIG_PPC_MPC831x) += usb_831x.o +obj-$(CONFIG_PPC_MPC834x) += usb_834x.o +obj-$(CONFIG_PPC_MPC837x) += usb_837x.o diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c index 68061c2a57c1..6870d0c34f1d 100644 --- a/arch/powerpc/platforms/83xx/asp834x.c +++ b/arch/powerpc/platforms/83xx/asp834x.c @@ -32,23 +32,14 @@ static void __init asp834x_setup_arch(void) machine_device_initcall(asp834x, mpc83xx_declare_of_platform_devices); -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init asp834x_probe(void) -{ - return of_machine_is_compatible("analogue-and-micro,asp8347e"); -} - define_machine(asp834x) { .name = "ASP8347E", - .probe = asp834x_probe, + .compatible = "analogue-and-micro,asp8347e", .setup_arch = asp834x_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c index 907acdecc94a..2b5d187d9b62 100644 --- a/arch/powerpc/platforms/83xx/km83xx.c +++ b/arch/powerpc/platforms/83xx/km83xx.c @@ -20,8 +20,8 @@ #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/initrd.h> -#include <linux/of_platform.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <linux/atomic.h> #include <linux/time.h> @@ -184,6 +184,5 @@ define_machine(mpc83xx_km) { .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index abb62fa630ef..6e37dfc6c5c9 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -92,10 +92,11 @@ static void mcu_power_off(void) mutex_unlock(&mcu->lock); } -static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct mcu *mcu = gpiochip_get_data(gc); u8 bit = 1 << (4 + gpio); + int ret; mutex_lock(&mcu->lock); if (val) @@ -103,14 +104,16 @@ static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) else mcu->reg_ctrl |= bit; - i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, mcu->reg_ctrl); + ret = i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, + mcu->reg_ctrl); mutex_unlock(&mcu->lock); + + return ret; } static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - mcu_gpio_set(gc, gpio, val); - return 0; + return mcu_gpio_set(gc, gpio, val); } static int mcu_gpiochip_add(struct mcu *mcu) @@ -123,7 +126,7 @@ static int mcu_gpiochip_add(struct mcu *mcu) gc->can_sleep = 1; gc->ngpio = MCU_NUM_GPIO; gc->base = -1; - gc->set = mcu_gpio_set; + gc->set_rv = mcu_gpio_set; gc->direction_output = mcu_gpio_dir_out; gc->parent = dev; @@ -178,7 +181,7 @@ err: return ret; } -static int mcu_remove(struct i2c_client *client) +static void mcu_remove(struct i2c_client *client) { struct mcu *mcu = i2c_get_clientdata(client); @@ -193,7 +196,6 @@ static int mcu_remove(struct i2c_client *client) mcu_gpiochip_remove(mcu); kfree(mcu); - return 0; } static const struct i2c_device_id mcu_ids[] = { @@ -212,7 +214,7 @@ static struct i2c_driver mcu_driver = { .name = "mcu-mpc8349emitx", .of_match_table = mcu_of_match_table, }, - .probe_new = mcu_probe, + .probe = mcu_probe, .remove = mcu_remove, .id_table = mcu_ids, }; diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c index 3285dabcf923..1135c1ab923c 100644 --- a/arch/powerpc/platforms/83xx/misc.c +++ b/arch/powerpc/platforms/83xx/misc.c @@ -14,6 +14,8 @@ #include <asm/io.h> #include <asm/hw_irq.h> #include <asm/ipic.h> +#include <asm/fixmap.h> + #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> @@ -121,17 +123,15 @@ void __init mpc83xx_setup_pci(void) void __init mpc83xx_setup_arch(void) { + phys_addr_t immrbase = get_immrbase(); + int immrsize = IS_ALIGNED(immrbase, SZ_2M) ? SZ_2M : SZ_1M; + unsigned long va = fix_to_virt(FIX_IMMR_BASE); + if (ppc_md.progress) ppc_md.progress("mpc83xx_setup_arch()", 0); - if (!__map_without_bats) { - phys_addr_t immrbase = get_immrbase(); - int immrsize = IS_ALIGNED(immrbase, SZ_2M) ? SZ_2M : SZ_1M; - unsigned long va = fix_to_virt(FIX_IMMR_BASE); - - setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG); - update_bats(); - } + setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG); + update_bats(); } int machine_check_83xx(struct pt_regs *regs) diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c index 956d4389effa..63b6d213726a 100644 --- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c @@ -34,25 +34,16 @@ static const char *board[] __initdata = { NULL }; -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc830x_rdb_probe(void) -{ - return of_device_compatible_match(of_root, board); -} - machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices); define_machine(mpc830x_rdb) { .name = "MPC830x RDB", - .probe = mpc830x_rdb_probe, + .compatibles = board, .setup_arch = mpc830x_rdb_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c index 3b578f080e3b..5c39966762e4 100644 --- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c @@ -34,25 +34,16 @@ static const char *board[] __initdata = { NULL }; -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc831x_rdb_probe(void) -{ - return of_device_compatible_match(of_root, board); -} - machine_device_initcall(mpc831x_rdb, mpc83xx_declare_of_platform_devices); define_machine(mpc831x_rdb) { .name = "MPC831x RDB", - .probe = mpc831x_rdb_probe, + .compatibles = board, .setup_arch = mpc831x_rdb_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c deleted file mode 100644 index 435344405d2c..000000000000 --- a/arch/powerpc/platforms/83xx/mpc832x_mds.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Description: - * MPC832xE MDS board specific routines. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/reboot.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/major.h> -#include <linux/console.h> -#include <linux/delay.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/initrd.h> -#include <linux/of_platform.h> -#include <linux/of_device.h> - -#include <linux/atomic.h> -#include <asm/time.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/ipic.h> -#include <asm/irq.h> -#include <asm/udbg.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <soc/fsl/qe/qe.h> - -#include "mpc83xx.h" - -#undef DEBUG -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -/* ************************************************************************ - * - * Setup the architecture - * - */ -static void __init mpc832x_sys_setup_arch(void) -{ - struct device_node *np; - u8 __iomem *bcsr_regs = NULL; - - mpc83xx_setup_arch(); - - /* Map BCSR area */ - np = of_find_node_by_name(NULL, "bcsr"); - if (np) { - struct resource res; - - of_address_to_resource(np, 0, &res); - bcsr_regs = ioremap(res.start, resource_size(&res)); - of_node_put(np); - } - -#ifdef CONFIG_QUICC_ENGINE - if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { - par_io_init(np); - of_node_put(np); - - for_each_node_by_name(np, "ucc") - par_io_of_config(np); - } - - if ((np = of_find_compatible_node(NULL, "network", "ucc_geth")) - != NULL){ - /* Reset the Ethernet PHYs */ -#define BCSR8_FETH_RST 0x50 - clrbits8(&bcsr_regs[8], BCSR8_FETH_RST); - udelay(1000); - setbits8(&bcsr_regs[8], BCSR8_FETH_RST); - iounmap(bcsr_regs); - of_node_put(np); - } -#endif /* CONFIG_QUICC_ENGINE */ -} - -machine_device_initcall(mpc832x_mds, mpc83xx_declare_of_platform_devices); - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc832x_sys_probe(void) -{ - return of_machine_is_compatible("MPC832xMDS"); -} - -define_machine(mpc832x_mds) { - .name = "MPC832x MDS", - .probe = mpc832x_sys_probe, - .setup_arch = mpc832x_sys_setup_arch, - .discover_phbs = mpc83xx_setup_pci, - .init_IRQ = mpc83xx_ipic_init_IRQ, - .get_irq = ipic_get_irq, - .restart = mpc83xx_restart, - .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index bb8caa5071f8..d523ce0f48db 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -15,8 +15,10 @@ #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/fsl_devices.h> #include <asm/time.h> @@ -107,7 +109,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, goto next; unreg: - platform_device_del(pdev); + platform_device_put(pdev); err: pr_err("%pOF: registration failed\n", np); next: @@ -144,7 +146,7 @@ static int __init fsl_spi_init(struct spi_board_info *board_infos, static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on) { - pr_debug("%s %d %d\n", __func__, spi->chip_select, on); + pr_debug("%s %d %d\n", __func__, spi_get_chipselect(spi, 0), on); par_io_data_set(3, 13, on); } @@ -162,6 +164,8 @@ static struct spi_board_info mpc832x_spi_boardinfo = { static int __init mpc832x_spi_init(void) { + struct device_node *np; + par_io_config_pin(3, 0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */ par_io_config_pin(3, 1, 3, 0, 1, 0); /* SPI1 MISO, I/O */ par_io_config_pin(3, 2, 3, 0, 1, 0); /* SPI1 CLK, I/O */ @@ -175,7 +179,9 @@ static int __init mpc832x_spi_init(void) * Don't bother with legacy stuff when device tree contains * mmc-spi-slot node. */ - if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot")) + np = of_find_compatible_node(NULL, NULL, "mmc-spi-slot"); + of_node_put(np); + if (np) return 0; return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control); } @@ -208,23 +214,14 @@ static void __init mpc832x_rdb_setup_arch(void) machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices); -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc832x_rdb_probe(void) -{ - return of_machine_is_compatible("MPC832xRDB"); -} - define_machine(mpc832x_rdb) { .name = "MPC832x RDB", - .probe = mpc832x_rdb_probe, + .compatible = "MPC832xRDB", .setup_arch = mpc832x_rdb_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c index 6a110f275304..e45b98ff02d8 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_itx.c +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c @@ -57,23 +57,14 @@ static void __init mpc834x_itx_setup_arch(void) mpc834x_usb_cfg(); } -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc834x_itx_probe(void) -{ - return of_machine_is_compatible("MPC834xMITX"); -} - define_machine(mpc834x_itx) { .name = "MPC834x ITX", - .probe = mpc834x_itx_probe, + .compatible = "MPC834xMITX", .setup_arch = mpc834x_itx_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c deleted file mode 100644 index 7dde5a75332b..000000000000 --- a/arch/powerpc/platforms/83xx/mpc834x_mds.c +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * arch/powerpc/platforms/83xx/mpc834x_mds.c - * - * MPC834x MDS board specific routines - * - * Maintainer: Kumar Gala <galak@kernel.crashing.org> - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/reboot.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/major.h> -#include <linux/console.h> -#include <linux/delay.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> - -#include <linux/atomic.h> -#include <asm/time.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/ipic.h> -#include <asm/irq.h> -#include <asm/udbg.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> - -#include "mpc83xx.h" - -#define BCSR5_INT_USB 0x02 -static int __init mpc834xemds_usb_cfg(void) -{ - struct device_node *np; - void __iomem *bcsr_regs = NULL; - u8 bcsr5; - - mpc834x_usb_cfg(); - /* Map BCSR area */ - np = of_find_node_by_name(NULL, "bcsr"); - if (np) { - struct resource res; - - of_address_to_resource(np, 0, &res); - bcsr_regs = ioremap(res.start, resource_size(&res)); - of_node_put(np); - } - if (!bcsr_regs) - return -1; - - /* - * if Processor Board is plugged into PIB board, - * force to use the PHY on Processor Board - */ - bcsr5 = in_8(bcsr_regs + 5); - if (!(bcsr5 & BCSR5_INT_USB)) - out_8(bcsr_regs + 5, (bcsr5 | BCSR5_INT_USB)); - iounmap(bcsr_regs); - return 0; -} - -/* ************************************************************************ - * - * Setup the architecture - * - */ -static void __init mpc834x_mds_setup_arch(void) -{ - mpc83xx_setup_arch(); - - mpc834xemds_usb_cfg(); -} - -machine_device_initcall(mpc834x_mds, mpc83xx_declare_of_platform_devices); - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc834x_mds_probe(void) -{ - return of_machine_is_compatible("MPC834xMDS"); -} - -define_machine(mpc834x_mds) { - .name = "MPC834x MDS", - .probe = mpc834x_mds_probe, - .setup_arch = mpc834x_mds_setup_arch, - .discover_phbs = mpc83xx_setup_pci, - .init_IRQ = mpc83xx_ipic_init_IRQ, - .get_irq = ipic_get_irq, - .restart = mpc83xx_restart, - .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c deleted file mode 100644 index b1e6665be5d3..000000000000 --- a/arch/powerpc/platforms/83xx/mpc836x_mds.c +++ /dev/null @@ -1,210 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Author: Li Yang <LeoLi@freescale.com> - * Yin Olivia <Hong-hua.Yin@freescale.com> - * - * Description: - * MPC8360E MDS board specific routines. - * - * Changelog: - * Jun 21, 2006 Initial version - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/compiler.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/reboot.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/major.h> -#include <linux/console.h> -#include <linux/delay.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/initrd.h> -#include <linux/of_platform.h> -#include <linux/of_device.h> - -#include <linux/atomic.h> -#include <asm/time.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/ipic.h> -#include <asm/irq.h> -#include <asm/udbg.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <soc/fsl/qe/qe.h> - -#include "mpc83xx.h" - -#undef DEBUG -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -/* ************************************************************************ - * - * Setup the architecture - * - */ -static void __init mpc836x_mds_setup_arch(void) -{ - struct device_node *np; - u8 __iomem *bcsr_regs = NULL; - - mpc83xx_setup_arch(); - - /* Map BCSR area */ - np = of_find_node_by_name(NULL, "bcsr"); - if (np) { - struct resource res; - - of_address_to_resource(np, 0, &res); - bcsr_regs = ioremap(res.start, resource_size(&res)); - of_node_put(np); - } - -#ifdef CONFIG_QUICC_ENGINE - if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { - par_io_init(np); - of_node_put(np); - - for_each_node_by_name(np, "ucc") - par_io_of_config(np); -#ifdef CONFIG_QE_USB - /* Must fixup Par IO before QE GPIO chips are registered. */ - par_io_config_pin(1, 2, 1, 0, 3, 0); /* USBOE */ - par_io_config_pin(1, 3, 1, 0, 3, 0); /* USBTP */ - par_io_config_pin(1, 8, 1, 0, 1, 0); /* USBTN */ - par_io_config_pin(1, 10, 2, 0, 3, 0); /* USBRXD */ - par_io_config_pin(1, 9, 2, 1, 3, 0); /* USBRP */ - par_io_config_pin(1, 11, 2, 1, 3, 0); /* USBRN */ - par_io_config_pin(2, 20, 2, 0, 1, 0); /* CLK21 */ -#endif /* CONFIG_QE_USB */ - } - - if ((np = of_find_compatible_node(NULL, "network", "ucc_geth")) - != NULL){ - uint svid; - - /* Reset the Ethernet PHY */ -#define BCSR9_GETHRST 0x20 - clrbits8(&bcsr_regs[9], BCSR9_GETHRST); - udelay(1000); - setbits8(&bcsr_regs[9], BCSR9_GETHRST); - - /* handle mpc8360ea rev.2.1 erratum 2: RGMII Timing */ - svid = mfspr(SPRN_SVR); - if (svid == 0x80480021) { - void __iomem *immap; - - immap = ioremap(get_immrbase() + 0x14a8, 8); - - /* - * IMMR + 0x14A8[4:5] = 11 (clk delay for UCC 2) - * IMMR + 0x14A8[18:19] = 11 (clk delay for UCC 1) - */ - setbits32(immap, 0x0c003000); - - /* - * IMMR + 0x14AC[20:27] = 10101010 - * (data delay for both UCC's) - */ - clrsetbits_be32(immap + 4, 0xff0, 0xaa0); - - iounmap(immap); - } - - iounmap(bcsr_regs); - of_node_put(np); - } -#endif /* CONFIG_QUICC_ENGINE */ -} - -machine_device_initcall(mpc836x_mds, mpc83xx_declare_of_platform_devices); - -#ifdef CONFIG_QE_USB -static int __init mpc836x_usb_cfg(void) -{ - u8 __iomem *bcsr; - struct device_node *np; - const char *mode; - int ret = 0; - - np = of_find_compatible_node(NULL, NULL, "fsl,mpc8360mds-bcsr"); - if (!np) - return -ENODEV; - - bcsr = of_iomap(np, 0); - of_node_put(np); - if (!bcsr) - return -ENOMEM; - - np = of_find_compatible_node(NULL, NULL, "fsl,mpc8323-qe-usb"); - if (!np) { - ret = -ENODEV; - goto err; - } - -#define BCSR8_TSEC1M_MASK (0x3 << 6) -#define BCSR8_TSEC1M_RGMII (0x0 << 6) -#define BCSR8_TSEC2M_MASK (0x3 << 4) -#define BCSR8_TSEC2M_RGMII (0x0 << 4) - /* - * Default is GMII (2), but we should set it to RGMII (0) if we use - * USB (Eth PHY is in RGMII mode anyway). - */ - clrsetbits_8(&bcsr[8], BCSR8_TSEC1M_MASK | BCSR8_TSEC2M_MASK, - BCSR8_TSEC1M_RGMII | BCSR8_TSEC2M_RGMII); - -#define BCSR13_USBMASK 0x0f -#define BCSR13_nUSBEN 0x08 /* 1 - Disable, 0 - Enable */ -#define BCSR13_USBSPEED 0x04 /* 1 - Full, 0 - Low */ -#define BCSR13_USBMODE 0x02 /* 1 - Host, 0 - Function */ -#define BCSR13_nUSBVCC 0x01 /* 1 - gets VBUS, 0 - supplies VBUS */ - - clrsetbits_8(&bcsr[13], BCSR13_USBMASK, BCSR13_USBSPEED); - - mode = of_get_property(np, "mode", NULL); - if (mode && !strcmp(mode, "peripheral")) { - setbits8(&bcsr[13], BCSR13_nUSBVCC); - qe_usb_clock_set(QE_CLK21, 48000000); - } else { - setbits8(&bcsr[13], BCSR13_USBMODE); - } - - of_node_put(np); -err: - iounmap(bcsr); - return ret; -} -machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg); -#endif /* CONFIG_QE_USB */ - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc836x_mds_probe(void) -{ - return of_machine_is_compatible("MPC836xMDS"); -} - -define_machine(mpc836x_mds) { - .name = "MPC836x MDS", - .probe = mpc836x_mds_probe, - .setup_arch = mpc836x_mds_setup_arch, - .discover_phbs = mpc83xx_setup_pci, - .init_IRQ = mpc83xx_ipic_init_IRQ, - .get_irq = ipic_get_irq, - .restart = mpc83xx_restart, - .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c index 731bc5ce726d..1fc9d1235a7c 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c +++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c @@ -28,23 +28,14 @@ static void __init mpc836x_rdk_setup_arch(void) mpc83xx_setup_arch(); } -/* - * Called very early, MMU is off, device-tree isn't unflattened. - */ -static int __init mpc836x_rdk_probe(void) -{ - return of_machine_is_compatible("fsl,mpc8360rdk"); -} - define_machine(mpc836x_rdk) { .name = "MPC836x RDK", - .probe = mpc836x_rdk_probe, + .compatible = "fsl,mpc8360rdk", .setup_arch = mpc836x_rdk_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c deleted file mode 100644 index fa3538803af7..000000000000 --- a/arch/powerpc/platforms/83xx/mpc837x_mds.c +++ /dev/null @@ -1,103 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * arch/powerpc/platforms/83xx/mpc837x_mds.c - * - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. - * - * MPC837x MDS board specific routines - */ - -#include <linux/pci.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> - -#include <asm/time.h> -#include <asm/ipic.h> -#include <asm/udbg.h> -#include <sysdev/fsl_pci.h> - -#include "mpc83xx.h" - -#define BCSR12_USB_SER_MASK 0x8a -#define BCSR12_USB_SER_PIN 0x80 -#define BCSR12_USB_SER_DEVICE 0x02 - -static int __init mpc837xmds_usb_cfg(void) -{ - struct device_node *np; - const void *phy_type, *mode; - void __iomem *bcsr_regs = NULL; - u8 bcsr12; - int ret; - - ret = mpc837x_usb_cfg(); - if (ret) - return ret; - /* Map BCSR area */ - np = of_find_compatible_node(NULL, NULL, "fsl,mpc837xmds-bcsr"); - if (np) { - bcsr_regs = of_iomap(np, 0); - of_node_put(np); - } - if (!bcsr_regs) - return -1; - - np = of_find_node_by_name(NULL, "usb"); - if (!np) { - ret = -ENODEV; - goto out; - } - phy_type = of_get_property(np, "phy_type", NULL); - if (phy_type && !strcmp(phy_type, "ulpi")) { - clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN); - } else if (phy_type && !strcmp(phy_type, "serial")) { - mode = of_get_property(np, "dr_mode", NULL); - bcsr12 = in_8(bcsr_regs + 12) & ~BCSR12_USB_SER_MASK; - bcsr12 |= BCSR12_USB_SER_PIN; - if (mode && !strcmp(mode, "peripheral")) - bcsr12 |= BCSR12_USB_SER_DEVICE; - out_8(bcsr_regs + 12, bcsr12); - } else { - printk(KERN_ERR "USB DR: unsupported PHY\n"); - } - - of_node_put(np); -out: - iounmap(bcsr_regs); - return ret; -} - -/* ************************************************************************ - * - * Setup the architecture - * - */ -static void __init mpc837x_mds_setup_arch(void) -{ - mpc83xx_setup_arch(); - mpc837xmds_usb_cfg(); -} - -machine_device_initcall(mpc837x_mds, mpc83xx_declare_of_platform_devices); - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc837x_mds_probe(void) -{ - return of_machine_is_compatible("fsl,mpc837xmds"); -} - -define_machine(mpc837x_mds) { - .name = "MPC837x MDS", - .probe = mpc837x_mds_probe, - .setup_arch = mpc837x_mds_setup_arch, - .discover_phbs = mpc83xx_setup_pci, - .init_IRQ = mpc83xx_ipic_init_IRQ, - .get_irq = ipic_get_irq, - .restart = mpc83xx_restart, - .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c index 5d48c6842098..45823e147933 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c @@ -61,23 +61,14 @@ static const char * const board[] __initconst = { NULL }; -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init mpc837x_rdb_probe(void) -{ - return of_device_compatible_match(of_root, board); -} - define_machine(mpc837x_rdb) { .name = "MPC837x RDB/WLAN", - .probe = mpc837x_rdb_probe, + .compatibles = board, .setup_arch = mpc837x_rdb_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h index aea803ba3a15..0b8738a2b980 100644 --- a/arch/powerpc/platforms/83xx/mpc83xx.h +++ b/arch/powerpc/platforms/83xx/mpc83xx.h @@ -3,8 +3,6 @@ #define __MPC83XX_H__ #include <linux/init.h> -#include <linux/device.h> -#include <asm/pci-bridge.h> /* System Clock Control Register */ #define MPC83XX_SCCR_OFFS 0xA08 diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index bc6bd4d0ae96..6a62ed6082c9 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S @@ -68,7 +68,8 @@ _GLOBAL(mpc83xx_enter_deep_sleep) mfspr r5, SPRN_HID0 mfspr r6, SPRN_HID1 - mfspr r7, SPRN_HID2 + /* FIXME: Should this use SPRN_HID2_G2_LE? */ + mfspr r7, SPRN_HID2_750FX stw r5, SS_HID+0(r3) stw r6, SS_HID+4(r3) @@ -396,7 +397,8 @@ mpc83xx_deep_resume: mtspr SPRN_HID0, r5 mtspr SPRN_HID1, r6 - mtspr SPRN_HID2, r7 + /* FIXME: Should this use SPRN_HID2_G2_LE? */ + mtspr SPRN_HID2_750FX, r7 lwz r4, SS_IABR+0(r3) lwz r5, SS_IABR+4(r3) diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 6d47a5b81485..99bd4355f28e 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -19,7 +19,7 @@ #include <linux/fsl_devices.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/export.h> #include <asm/reg.h> @@ -100,7 +100,6 @@ struct pmc_type { int has_deep_sleep; }; -static struct platform_device *pmc_dev; static int has_deep_sleep, deep_sleeping; static int pmc_irq; static struct mpc83xx_pmc __iomem *pmc_regs; @@ -207,7 +206,8 @@ static int mpc83xx_suspend_enter(suspend_state_t state) out_be32(&pmc_regs->config1, in_be32(&pmc_regs->config1) | PMCCR1_POWER_OFF); - enable_kernel_fp(); + if (IS_ENABLED(CONFIG_PPC_FPU)) + enable_kernel_fp(); mpc83xx_enter_deep_sleep(immrbase); @@ -262,9 +262,10 @@ static int mpc83xx_suspend_begin(suspend_state_t state) static int agent_thread_fn(void *data) { + set_freezable(); + while (1) { - wait_event_interruptible(agent_wq, pci_pm_state >= 2); - try_to_freeze(); + wait_event_freezable(agent_wq, pci_pm_state >= 2); if (signal_pending(current) || pci_pm_state < 2) continue; @@ -319,7 +320,27 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { .end = mpc83xx_suspend_end, }; -static const struct of_device_id pmc_match[]; +static struct pmc_type pmc_types[] = { + { + .has_deep_sleep = 1, + }, + { + .has_deep_sleep = 0, + } +}; + +static const struct of_device_id pmc_match[] = { + { + .compatible = "fsl,mpc8313-pmc", + .data = &pmc_types[0], + }, + { + .compatible = "fsl,mpc8349-pmc", + .data = &pmc_types[1], + }, + {} +}; + static int pmc_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; @@ -336,7 +357,6 @@ static int pmc_probe(struct platform_device *ofdev) has_deep_sleep = type->has_deep_sleep; immrbase = get_immrbase(); - pmc_dev = ofdev; is_pci_agent = mpc83xx_is_pci_agent(); if (is_pci_agent < 0) @@ -401,39 +421,13 @@ out: return ret; } -static int pmc_remove(struct platform_device *ofdev) -{ - return -EPERM; -}; - -static struct pmc_type pmc_types[] = { - { - .has_deep_sleep = 1, - }, - { - .has_deep_sleep = 0, - } -}; - -static const struct of_device_id pmc_match[] = { - { - .compatible = "fsl,mpc8313-pmc", - .data = &pmc_types[0], - }, - { - .compatible = "fsl,mpc8349-pmc", - .data = &pmc_types[1], - }, - {} -}; - static struct platform_driver pmc_driver = { .driver = { .name = "mpc83xx-pmc", .of_match_table = pmc_match, + .suppress_bind_attrs = true, }, .probe = pmc_probe, - .remove = pmc_remove }; builtin_platform_driver(pmc_driver); diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c deleted file mode 100644 index e2a13a052f96..000000000000 --- a/arch/powerpc/platforms/83xx/usb.c +++ /dev/null @@ -1,251 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Freescale 83xx USB SOC setup code - * - * Copyright (C) 2007 Freescale Semiconductor, Inc. - * Author: Li Yang - */ - - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/of.h> -#include <linux/of_address.h> - -#include <asm/io.h> -#include <sysdev/fsl_soc.h> - -#include "mpc83xx.h" - - -#ifdef CONFIG_PPC_MPC834x -int __init mpc834x_usb_cfg(void) -{ - unsigned long sccr, sicrl, sicrh; - void __iomem *immap; - struct device_node *np = NULL; - int port0_is_dr = 0, port1_is_dr = 0; - const void *prop, *dr_mode; - - immap = ioremap(get_immrbase(), 0x1000); - if (!immap) - return -ENOMEM; - - /* Read registers */ - /* Note: DR and MPH must use the same clock setting in SCCR */ - sccr = in_be32(immap + MPC83XX_SCCR_OFFS) & ~MPC83XX_SCCR_USB_MASK; - sicrl = in_be32(immap + MPC83XX_SICRL_OFFS) & ~MPC834X_SICRL_USB_MASK; - sicrh = in_be32(immap + MPC83XX_SICRH_OFFS) & ~MPC834X_SICRH_USB_UTMI; - - np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); - if (np) { - sccr |= MPC83XX_SCCR_USB_DRCM_11; /* 1:3 */ - - prop = of_get_property(np, "phy_type", NULL); - port1_is_dr = 1; - if (prop && (!strcmp(prop, "utmi") || - !strcmp(prop, "utmi_wide"))) { - sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1; - sicrh |= MPC834X_SICRH_USB_UTMI; - port0_is_dr = 1; - } else if (prop && !strcmp(prop, "serial")) { - dr_mode = of_get_property(np, "dr_mode", NULL); - if (dr_mode && !strcmp(dr_mode, "otg")) { - sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1; - port0_is_dr = 1; - } else { - sicrl |= MPC834X_SICRL_USB1; - } - } else if (prop && !strcmp(prop, "ulpi")) { - sicrl |= MPC834X_SICRL_USB1; - } else { - printk(KERN_WARNING "834x USB PHY type not supported\n"); - } - of_node_put(np); - } - np = of_find_compatible_node(NULL, NULL, "fsl-usb2-mph"); - if (np) { - sccr |= MPC83XX_SCCR_USB_MPHCM_11; /* 1:3 */ - - prop = of_get_property(np, "port0", NULL); - if (prop) { - if (port0_is_dr) - printk(KERN_WARNING - "834x USB port0 can't be used by both DR and MPH!\n"); - sicrl &= ~MPC834X_SICRL_USB0; - } - prop = of_get_property(np, "port1", NULL); - if (prop) { - if (port1_is_dr) - printk(KERN_WARNING - "834x USB port1 can't be used by both DR and MPH!\n"); - sicrl &= ~MPC834X_SICRL_USB1; - } - of_node_put(np); - } - - /* Write back */ - out_be32(immap + MPC83XX_SCCR_OFFS, sccr); - out_be32(immap + MPC83XX_SICRL_OFFS, sicrl); - out_be32(immap + MPC83XX_SICRH_OFFS, sicrh); - - iounmap(immap); - return 0; -} -#endif /* CONFIG_PPC_MPC834x */ - -#ifdef CONFIG_PPC_MPC831x -int __init mpc831x_usb_cfg(void) -{ - u32 temp; - void __iomem *immap, *usb_regs; - struct device_node *np = NULL; - struct device_node *immr_node = NULL; - const void *prop; - struct resource res; - int ret = 0; -#ifdef CONFIG_USB_OTG - const void *dr_mode; -#endif - - np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); - if (!np) - return -ENODEV; - prop = of_get_property(np, "phy_type", NULL); - - /* Map IMMR space for pin and clock settings */ - immap = ioremap(get_immrbase(), 0x1000); - if (!immap) { - of_node_put(np); - return -ENOMEM; - } - - /* Configure clock */ - immr_node = of_get_parent(np); - if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") || - of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))) - clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, - MPC8315_SCCR_USB_MASK, - MPC8315_SCCR_USB_DRCM_01); - else - clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, - MPC83XX_SCCR_USB_MASK, - MPC83XX_SCCR_USB_DRCM_11); - - /* Configure pin mux for ULPI. There is no pin mux for UTMI */ - if (prop && !strcmp(prop, "ulpi")) { - if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { - clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, - MPC8308_SICRH_USB_MASK, - MPC8308_SICRH_USB_ULPI); - } else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) { - clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, - MPC8315_SICRL_USB_MASK, - MPC8315_SICRL_USB_ULPI); - clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, - MPC8315_SICRH_USB_MASK, - MPC8315_SICRH_USB_ULPI); - } else { - clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, - MPC831X_SICRL_USB_MASK, - MPC831X_SICRL_USB_ULPI); - clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, - MPC831X_SICRH_USB_MASK, - MPC831X_SICRH_USB_ULPI); - } - } - - iounmap(immap); - - of_node_put(immr_node); - - /* Map USB SOC space */ - ret = of_address_to_resource(np, 0, &res); - if (ret) { - of_node_put(np); - return ret; - } - usb_regs = ioremap(res.start, resource_size(&res)); - - /* Using on-chip PHY */ - if (prop && (!strcmp(prop, "utmi_wide") || - !strcmp(prop, "utmi"))) { - u32 refsel; - - if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) - goto out; - - if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) - refsel = CONTROL_REFSEL_24MHZ; - else - refsel = CONTROL_REFSEL_48MHZ; - /* Set UTMI_PHY_EN and REFSEL */ - out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, - CONTROL_UTMI_PHY_EN | refsel); - /* Using external UPLI PHY */ - } else if (prop && !strcmp(prop, "ulpi")) { - /* Set PHY_CLK_SEL to ULPI */ - temp = CONTROL_PHY_CLK_SEL_ULPI; -#ifdef CONFIG_USB_OTG - /* Set OTG_PORT */ - if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { - dr_mode = of_get_property(np, "dr_mode", NULL); - if (dr_mode && !strcmp(dr_mode, "otg")) - temp |= CONTROL_OTG_PORT; - } -#endif /* CONFIG_USB_OTG */ - out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp); - } else { - printk(KERN_WARNING "831x USB PHY type not supported\n"); - ret = -EINVAL; - } - -out: - iounmap(usb_regs); - of_node_put(np); - return ret; -} -#endif /* CONFIG_PPC_MPC831x */ - -#ifdef CONFIG_PPC_MPC837x -int __init mpc837x_usb_cfg(void) -{ - void __iomem *immap; - struct device_node *np = NULL; - const void *prop; - int ret = 0; - - np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); - if (!np || !of_device_is_available(np)) { - of_node_put(np); - return -ENODEV; - } - prop = of_get_property(np, "phy_type", NULL); - - if (!prop || (strcmp(prop, "ulpi") && strcmp(prop, "serial"))) { - printk(KERN_WARNING "837x USB PHY type not supported\n"); - of_node_put(np); - return -EINVAL; - } - - /* Map IMMR space for pin and clock settings */ - immap = ioremap(get_immrbase(), 0x1000); - if (!immap) { - of_node_put(np); - return -ENOMEM; - } - - /* Configure clock */ - clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, MPC837X_SCCR_USB_DRCM_11, - MPC837X_SCCR_USB_DRCM_11); - - /* Configure pin mux for ULPI/serial */ - clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, MPC837X_SICRL_USB_MASK, - MPC837X_SICRL_USB_ULPI); - - iounmap(immap); - of_node_put(np); - return ret; -} -#endif /* CONFIG_PPC_MPC837x */ diff --git a/arch/powerpc/platforms/83xx/usb_831x.c b/arch/powerpc/platforms/83xx/usb_831x.c new file mode 100644 index 000000000000..28c24e90f022 --- /dev/null +++ b/arch/powerpc/platforms/83xx/usb_831x.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale 83xx USB SOC setup code + * + * Copyright (C) 2007 Freescale Semiconductor, Inc. + * Author: Li Yang + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> + +#include <sysdev/fsl_soc.h> + +#include "mpc83xx.h" + +int __init mpc831x_usb_cfg(void) +{ + u32 temp; + void __iomem *immap, *usb_regs; + struct device_node *np = NULL; + struct device_node *immr_node = NULL; + const void *prop; + struct resource res; + int ret = 0; +#ifdef CONFIG_USB_OTG + const void *dr_mode; +#endif + + np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); + if (!np) + return -ENODEV; + prop = of_get_property(np, "phy_type", NULL); + + /* Map IMMR space for pin and clock settings */ + immap = ioremap(get_immrbase(), 0x1000); + if (!immap) { + of_node_put(np); + return -ENOMEM; + } + + /* Configure clock */ + immr_node = of_get_parent(np); + if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") || + of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))) + clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, + MPC8315_SCCR_USB_MASK, + MPC8315_SCCR_USB_DRCM_01); + else + clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, + MPC83XX_SCCR_USB_MASK, + MPC83XX_SCCR_USB_DRCM_11); + + /* Configure pin mux for ULPI. There is no pin mux for UTMI */ + if (prop && !strcmp(prop, "ulpi")) { + if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { + clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, + MPC8308_SICRH_USB_MASK, + MPC8308_SICRH_USB_ULPI); + } else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) { + clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, + MPC8315_SICRL_USB_MASK, + MPC8315_SICRL_USB_ULPI); + clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, + MPC8315_SICRH_USB_MASK, + MPC8315_SICRH_USB_ULPI); + } else { + clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, + MPC831X_SICRL_USB_MASK, + MPC831X_SICRL_USB_ULPI); + clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, + MPC831X_SICRH_USB_MASK, + MPC831X_SICRH_USB_ULPI); + } + } + + iounmap(immap); + + of_node_put(immr_node); + + /* Map USB SOC space */ + ret = of_address_to_resource(np, 0, &res); + if (ret) { + of_node_put(np); + return ret; + } + usb_regs = ioremap(res.start, resource_size(&res)); + + /* Using on-chip PHY */ + if (prop && (!strcmp(prop, "utmi_wide") || !strcmp(prop, "utmi"))) { + u32 refsel; + + if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) + goto out; + + if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) + refsel = CONTROL_REFSEL_24MHZ; + else + refsel = CONTROL_REFSEL_48MHZ; + /* Set UTMI_PHY_EN and REFSEL */ + out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, + CONTROL_UTMI_PHY_EN | refsel); + /* Using external UPLI PHY */ + } else if (prop && !strcmp(prop, "ulpi")) { + /* Set PHY_CLK_SEL to ULPI */ + temp = CONTROL_PHY_CLK_SEL_ULPI; +#ifdef CONFIG_USB_OTG + /* Set OTG_PORT */ + if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { + dr_mode = of_get_property(np, "dr_mode", NULL); + if (dr_mode && !strcmp(dr_mode, "otg")) + temp |= CONTROL_OTG_PORT; + } +#endif /* CONFIG_USB_OTG */ + out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp); + } else { + pr_warn("831x USB PHY type not supported\n"); + ret = -EINVAL; + } + +out: + iounmap(usb_regs); + of_node_put(np); + return ret; +} diff --git a/arch/powerpc/platforms/83xx/usb_834x.c b/arch/powerpc/platforms/83xx/usb_834x.c new file mode 100644 index 000000000000..3a8d6c662d06 --- /dev/null +++ b/arch/powerpc/platforms/83xx/usb_834x.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale 83xx USB SOC setup code + * + * Copyright (C) 2007 Freescale Semiconductor, Inc. + * Author: Li Yang + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> + +#include <sysdev/fsl_soc.h> + +#include "mpc83xx.h" + +int __init mpc834x_usb_cfg(void) +{ + unsigned long sccr, sicrl, sicrh; + void __iomem *immap; + struct device_node *np = NULL; + int port0_is_dr = 0, port1_is_dr = 0; + const void *prop, *dr_mode; + + immap = ioremap(get_immrbase(), 0x1000); + if (!immap) + return -ENOMEM; + + /* Read registers */ + /* Note: DR and MPH must use the same clock setting in SCCR */ + sccr = in_be32(immap + MPC83XX_SCCR_OFFS) & ~MPC83XX_SCCR_USB_MASK; + sicrl = in_be32(immap + MPC83XX_SICRL_OFFS) & ~MPC834X_SICRL_USB_MASK; + sicrh = in_be32(immap + MPC83XX_SICRH_OFFS) & ~MPC834X_SICRH_USB_UTMI; + + np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); + if (np) { + sccr |= MPC83XX_SCCR_USB_DRCM_11; /* 1:3 */ + + prop = of_get_property(np, "phy_type", NULL); + port1_is_dr = 1; + if (prop && + (!strcmp(prop, "utmi") || !strcmp(prop, "utmi_wide"))) { + sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1; + sicrh |= MPC834X_SICRH_USB_UTMI; + port0_is_dr = 1; + } else if (prop && !strcmp(prop, "serial")) { + dr_mode = of_get_property(np, "dr_mode", NULL); + if (dr_mode && !strcmp(dr_mode, "otg")) { + sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1; + port0_is_dr = 1; + } else { + sicrl |= MPC834X_SICRL_USB1; + } + } else if (prop && !strcmp(prop, "ulpi")) { + sicrl |= MPC834X_SICRL_USB1; + } else { + pr_warn("834x USB PHY type not supported\n"); + } + of_node_put(np); + } + np = of_find_compatible_node(NULL, NULL, "fsl-usb2-mph"); + if (np) { + sccr |= MPC83XX_SCCR_USB_MPHCM_11; /* 1:3 */ + + prop = of_get_property(np, "port0", NULL); + if (prop) { + if (port0_is_dr) + pr_warn("834x USB port0 can't be used by both DR and MPH!\n"); + sicrl &= ~MPC834X_SICRL_USB0; + } + prop = of_get_property(np, "port1", NULL); + if (prop) { + if (port1_is_dr) + pr_warn("834x USB port1 can't be used by both DR and MPH!\n"); + sicrl &= ~MPC834X_SICRL_USB1; + } + of_node_put(np); + } + + /* Write back */ + out_be32(immap + MPC83XX_SCCR_OFFS, sccr); + out_be32(immap + MPC83XX_SICRL_OFFS, sicrl); + out_be32(immap + MPC83XX_SICRH_OFFS, sicrh); + + iounmap(immap); + return 0; +} diff --git a/arch/powerpc/platforms/83xx/usb_837x.c b/arch/powerpc/platforms/83xx/usb_837x.c new file mode 100644 index 000000000000..726935bb6e2d --- /dev/null +++ b/arch/powerpc/platforms/83xx/usb_837x.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale 83xx USB SOC setup code + * + * Copyright (C) 2007 Freescale Semiconductor, Inc. + * Author: Li Yang + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> + +#include <sysdev/fsl_soc.h> + +#include "mpc83xx.h" + +int __init mpc837x_usb_cfg(void) +{ + void __iomem *immap; + struct device_node *np = NULL; + const void *prop; + int ret = 0; + + np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); + if (!np || !of_device_is_available(np)) { + of_node_put(np); + return -ENODEV; + } + prop = of_get_property(np, "phy_type", NULL); + + if (!prop || (strcmp(prop, "ulpi") && strcmp(prop, "serial"))) { + pr_warn("837x USB PHY type not supported\n"); + of_node_put(np); + return -EINVAL; + } + + /* Map IMMR space for pin and clock settings */ + immap = ioremap(get_immrbase(), 0x1000); + if (!immap) { + of_node_put(np); + return -ENOMEM; + } + + /* Configure clock */ + clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, MPC837X_SCCR_USB_DRCM_11, + MPC837X_SCCR_USB_DRCM_11); + + /* Configure pin mux for ULPI/serial */ + clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, MPC837X_SICRL_USB_MASK, + MPC837X_SICRL_USB_ULPI); + + iounmap(immap); + of_node_put(np); + return ret; +} diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 2be17ffe8714..604c1b4b6d45 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig FSL_SOC_BOOKE bool "Freescale Book-E Machine Type" - depends on PPC_85xx || PPC_BOOK3E + depends on PPC_E500 select FSL_SOC select PPC_UDBG_16550 select MPIC @@ -40,35 +40,14 @@ config BSC9132_QDS and dual StarCore SC3850 DSP cores. Manufacturer : Freescale Semiconductor, Inc -config MPC8540_ADS - bool "Freescale MPC8540 ADS" - select DEFAULT_UIMAGE - help - This option enables support for the MPC 8540 ADS board - -config MPC8560_ADS - bool "Freescale MPC8560 ADS" - select DEFAULT_UIMAGE - select CPM2 - help - This option enables support for the MPC 8560 ADS board - -config MPC85xx_CDS - bool "Freescale MPC85xx CDS" - select DEFAULT_UIMAGE - select PPC_I8259 - select HAVE_RAPIDIO - help - This option enables support for the MPC85xx CDS board - config MPC85xx_MDS - bool "Freescale MPC85xx MDS" + bool "Freescale MPC8568 MDS / MPC8569 MDS / P1021 MDS" select DEFAULT_UIMAGE select PHYLIB if NETDEVICES select HAVE_RAPIDIO select SWIOTLB help - This option enables support for the MPC85xx MDS board + This option enables support for the MPC8568 MDS, MPC8569 MDS and P1021 MDS boards config MPC8536_DS bool "Freescale MPC8536 DS" @@ -78,28 +57,43 @@ config MPC8536_DS This option enables support for the MPC8536 DS board config MPC85xx_DS - bool "Freescale MPC85xx DS" + bool "Freescale MPC8544 DS / MPC8572 DS" select PPC_I8259 select DEFAULT_UIMAGE select FSL_ULI1575 if PCI select SWIOTLB help - This option enables support for the MPC85xx DS (MPC8544 DS) board + This option enables support for the MPC8544 DS and MPC8572 DS boards config MPC85xx_RDB - bool "Freescale MPC85xx RDB" + bool "Freescale P102x MBG/UTM/RDB" select PPC_I8259 select DEFAULT_UIMAGE - select FSL_ULI1575 if PCI select SWIOTLB help - This option enables support for the MPC85xx RDB (P2020 RDB) board + This option enables support for the P1020 MBG PC, P1020 UTM PC, + P1020 RDB PC, P1020 RDB PD, P1020 RDB, P1021 RDB PC, P1024 RDB, + and P1025 RDB boards + +config PPC_P2020 + bool "Freescale P2020" + default y if MPC85xx_DS || MPC85xx_RDB + select DEFAULT_UIMAGE + select SWIOTLB + imply PPC_I8259 + imply FSL_ULI1575 if PCI + help + This option enables generic unified support for any board with the + Freescale P2020 processor. + + For example: P2020 DS board, P2020 RDB board, P2020 RDB PC board or + CZ.NIC Turris 1.x boards. config P1010_RDB - bool "Freescale P1010RDB" + bool "Freescale P1010 RDB" select DEFAULT_UIMAGE help - This option enables support for the MPC85xx RDB (P1010 RDB) board + This option enables support for the P1010 RDB board P1010RDB contains P1010Si, which provides CPU performance up to 800 MHz and 1600 DMIPS, additional functionality and faster interfaces @@ -239,8 +233,6 @@ endif # PPC32 config PPC_QEMU_E500 bool "QEMU generic e500 platform" select DEFAULT_UIMAGE - select E500 - select PPC_E500MC if PPC64 help This option enables support for running as a QEMU guest using QEMU's generic e500 machine. This is not required if you're @@ -256,7 +248,6 @@ config PPC_QEMU_E500 config CORENET_GENERIC bool "Freescale CoreNet Generic" select DEFAULT_UIMAGE - select E500 select PPC_E500MC select PHYS_64BIT select SWIOTLB diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile index 260fbad7967b..43c34f26f108 100644 --- a/arch/powerpc/platforms/85xx/Makefile +++ b/arch/powerpc/platforms/85xx/Makefile @@ -12,17 +12,16 @@ obj-y += common.o obj-$(CONFIG_BSC9131_RDB) += bsc913x_rdb.o obj-$(CONFIG_BSC9132_QDS) += bsc913x_qds.o obj-$(CONFIG_C293_PCIE) += c293pcie.o -obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o -obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o -obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o obj-$(CONFIG_MPC8536_DS) += mpc8536_ds.o -obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o +obj8259-$(CONFIG_PPC_I8259) += mpc85xx_8259.o +obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o $(obj8259-y) obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o obj-$(CONFIG_P1010_RDB) += p1010rdb.o obj-$(CONFIG_P1022_DS) += p1022_ds.o obj-$(CONFIG_P1022_RDK) += p1022_rdk.o obj-$(CONFIG_P1023_RDB) += p1023_rdb.o +obj-$(CONFIG_PPC_P2020) += p2020.o $(obj8259-y) obj-$(CONFIG_TWR_P102x) += twr_p102x.o obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o obj-$(CONFIG_FB_FSL_DIU) += t1042rdb_diu.o diff --git a/arch/powerpc/platforms/85xx/bsc913x_qds.c b/arch/powerpc/platforms/85xx/bsc913x_qds.c index bcbbeb5a972a..3ad8096fcf16 100644 --- a/arch/powerpc/platforms/85xx/bsc913x_qds.c +++ b/arch/powerpc/platforms/85xx/bsc913x_qds.c @@ -9,7 +9,7 @@ * Copyright 2014 Freescale Semiconductor Inc. */ -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/pci.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> @@ -19,7 +19,7 @@ #include "mpc85xx.h" #include "smp.h" -void __init bsc913x_qds_pic_init(void) +static void __init bsc913x_qds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, @@ -50,24 +50,14 @@ static void __init bsc913x_qds_setup_arch(void) machine_arch_initcall(bsc9132_qds, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ - -static int __init bsc9132_qds_probe(void) -{ - return of_machine_is_compatible("fsl,bsc9132qds"); -} - define_machine(bsc9132_qds) { .name = "BSC9132 QDS", - .probe = bsc9132_qds_probe, + .compatible = "fsl,bsc9132qds", .setup_arch = bsc913x_qds_setup_arch, .init_IRQ = bsc913x_qds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/bsc913x_rdb.c b/arch/powerpc/platforms/85xx/bsc913x_rdb.c index f78e5d3deedb..dcd358c28201 100644 --- a/arch/powerpc/platforms/85xx/bsc913x_rdb.c +++ b/arch/powerpc/platforms/85xx/bsc913x_rdb.c @@ -7,7 +7,7 @@ * Copyright 2011-2012 Freescale Semiconductor Inc. */ -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/pci.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> @@ -15,7 +15,7 @@ #include "mpc85xx.h" -void __init bsc913x_rdb_pic_init(void) +static void __init bsc913x_rdb_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, @@ -40,21 +40,11 @@ static void __init bsc913x_rdb_setup_arch(void) machine_device_initcall(bsc9131_rdb, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ - -static int __init bsc9131_rdb_probe(void) -{ - return of_machine_is_compatible("fsl,bsc9131rdb"); -} - define_machine(bsc9131_rdb) { .name = "BSC9131 RDB", - .probe = bsc9131_rdb_probe, + .compatible = "fsl,bsc9131rdb", .setup_arch = bsc913x_rdb_setup_arch, .init_IRQ = bsc913x_rdb_pic_init, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c index 58a398c89e97..7a63a3ad5e8a 100644 --- a/arch/powerpc/platforms/85xx/c293pcie.c +++ b/arch/powerpc/platforms/85xx/c293pcie.c @@ -7,8 +7,7 @@ #include <linux/stddef.h> #include <linux/kernel.h> -#include <linux/of_fdt.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/udbg.h> @@ -45,22 +44,11 @@ static void __init c293_pcie_setup_arch(void) machine_arch_initcall(c293_pcie, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init c293_pcie_probe(void) -{ - if (of_machine_is_compatible("fsl,C293PCIE")) - return 1; - return 0; -} - define_machine(c293_pcie) { .name = "C293 PCIE", - .probe = c293_pcie_probe, + .compatible = "fsl,C293PCIE", .setup_arch = c293_pcie_setup_arch, .init_IRQ = c293_pcie_pic_init, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index a554b6d87cf7..757811155587 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c @@ -3,6 +3,7 @@ * Routines common to most mpc85xx-based boards. */ +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 28d6b36f1ccd..c44400e95f55 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -30,7 +30,7 @@ #include "smp.h" #include "mpc85xx.h" -void __init corenet_gen_pic_init(void) +static void __init corenet_gen_pic_init(void) { struct mpic *mpic; unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU | @@ -48,7 +48,7 @@ void __init corenet_gen_pic_init(void) /* * Setup the architecture */ -void __init corenet_gen_setup_arch(void) +static void __init corenet_gen_setup_arch(void) { mpc85xx_smp_init(); @@ -101,7 +101,7 @@ static const struct of_device_id of_device_ids[] = { {} }; -int __init corenet_gen_publish_devices(void) +static int __init corenet_gen_publish_devices(void) { return of_platform_bus_probe(NULL, of_device_ids, NULL); } @@ -149,7 +149,7 @@ static int __init corenet_generic_probe(void) extern struct smp_ops_t smp_85xx_ops; #endif - if (of_device_compatible_match(of_root, boards)) + if (of_machine_compatible_match(boards)) return 1; /* Check if we're running under the Freescale hypervisor */ @@ -198,11 +198,6 @@ define_machine(corenet_generic) { #else .get_irq = mpic_get_coreint_irq, #endif - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else .power_save = e500_idle, -#endif }; diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c index 8e827376d97b..477852f1a726 100644 --- a/arch/powerpc/platforms/85xx/ge_imp3a.c +++ b/arch/powerpc/platforms/85xx/ge_imp3a.c @@ -17,8 +17,8 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> @@ -38,7 +38,7 @@ void __iomem *imp3a_regs; -void __init ge_imp3a_pic_init(void) +static void __init ge_imp3a_pic_init(void) { struct mpic *mpic; struct device_node *np; @@ -89,8 +89,10 @@ static void __init ge_imp3a_pci_assign_primary(void) of_device_is_compatible(np, "fsl,mpc8548-pcie") || of_device_is_compatible(np, "fsl,p2020-pcie")) { of_address_to_resource(np, 0, &rsrc); - if ((rsrc.start & 0xfffff) == 0x9000) - fsl_pci_primary = np; + if ((rsrc.start & 0xfffff) == 0x9000) { + of_node_put(fsl_pci_primary); + fsl_pci_primary = of_node_get(np); + } } } #endif @@ -188,19 +190,11 @@ static void ge_imp3a_show_cpuinfo(struct seq_file *m) ge_imp3a_get_cpci_is_syscon() ? "yes" : "no"); } -/* - * Called very early, device-tree isn't unflattened - */ -static int __init ge_imp3a_probe(void) -{ - return of_machine_is_compatible("ge,IMP3A"); -} - machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices); define_machine(ge_imp3a) { .name = "GE_IMP3A", - .probe = ge_imp3a_probe, + .compatible = "ge,IMP3A", .setup_arch = ge_imp3a_setup_arch, .init_IRQ = ge_imp3a_pic_init, .show_cpuinfo = ge_imp3a_show_cpuinfo, @@ -209,6 +203,5 @@ define_machine(ge_imp3a) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index bdf9d42f8521..1b6326a4b0f2 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c @@ -18,7 +18,8 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <asm/time.h> #include <asm/machdep.h> @@ -133,6 +134,8 @@ static void __init ksi8560_setup_arch(void) else printk(KERN_ERR "Can't find CPLD in device tree\n"); + of_node_put(cpld); + if (ppc_md.progress) ppc_md.progress("ksi8560_setup_arch()", 0); @@ -170,21 +173,12 @@ static void ksi8560_show_cpuinfo(struct seq_file *m) machine_device_initcall(ksi8560, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init ksi8560_probe(void) -{ - return of_machine_is_compatible("emerson,KSI8560"); -} - define_machine(ksi8560) { .name = "KSI8560", - .probe = ksi8560_probe, + .compatible = "emerson,KSI8560", .setup_arch = ksi8560_setup_arch, .init_IRQ = ksi8560_pic_init, .show_cpuinfo = ksi8560_show_cpuinfo, .get_irq = mpic_get_irq, .restart = machine_restart, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c index e5d7386ad612..b3327a358eb4 100644 --- a/arch/powerpc/platforms/85xx/mpc8536_ds.c +++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c @@ -12,7 +12,7 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> @@ -27,7 +27,7 @@ #include "mpc85xx.h" -void __init mpc8536_ds_pic_init(void) +static void __init mpc8536_ds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); @@ -52,17 +52,9 @@ static void __init mpc8536_ds_setup_arch(void) machine_arch_initcall(mpc8536_ds, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc8536_ds_probe(void) -{ - return of_machine_is_compatible("fsl,mpc8536ds"); -} - define_machine(mpc8536_ds) { .name = "MPC8536 DS", - .probe = mpc8536_ds_probe, + .compatible = "fsl,mpc8536ds", .setup_arch = mpc8536_ds_setup_arch, .init_IRQ = mpc8536_ds_pic_init, #ifdef CONFIG_PCI @@ -70,6 +62,5 @@ define_machine(mpc8536_ds) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h index cb84c5c56c36..c764d7551ef1 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx.h +++ b/arch/powerpc/platforms/85xx/mpc85xx.h @@ -15,4 +15,10 @@ extern void mpc85xx_qe_par_io_init(void); static inline void __init mpc85xx_qe_par_io_init(void) {} #endif +#ifdef CONFIG_PPC_I8259 +void __init mpc85xx_8259_init(void); +#else +static inline void __init mpc85xx_8259_init(void) {} +#endif + #endif diff --git a/arch/powerpc/platforms/85xx/mpc85xx_8259.c b/arch/powerpc/platforms/85xx/mpc85xx_8259.c new file mode 100644 index 000000000000..cb00d596ad80 --- /dev/null +++ b/arch/powerpc/platforms/85xx/mpc85xx_8259.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MPC85xx 8259 functions for DS Board Setup + * + * Author Xianghua Xiao (x.xiao@freescale.com) + * Roy Zang <tie-fei.zang@freescale.com> + * - Add PCI/PCI Express support + * Copyright 2007 Freescale Semiconductor Inc. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +#include <asm/mpic.h> +#include <asm/i8259.h> + +#include "mpc85xx.h" + +static void mpc85xx_8259_cascade(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int cascade_irq = i8259_irq(); + + if (cascade_irq) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +void __init mpc85xx_8259_init(void) +{ + struct device_node *np; + struct device_node *cascade_node = NULL; + int cascade_irq; + + /* Initialize the i8259 controller */ + for_each_node_by_type(np, "interrupt-controller") { + if (of_device_is_compatible(np, "chrp,iic")) { + cascade_node = np; + break; + } + } + + if (cascade_node == NULL) { + pr_debug("i8259: Could not find i8259 PIC\n"); + return; + } + + cascade_irq = irq_of_parse_and_map(cascade_node, 0); + if (!cascade_irq) { + pr_err("i8259: Failed to map cascade interrupt\n"); + return; + } + + pr_debug("i8259: cascade mapped to irq %d\n", cascade_irq); + + i8259_init(cascade_node, 0); + of_node_put(cascade_node); + + irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade); +} diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c deleted file mode 100644 index a34fc037957d..000000000000 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * MPC85xx setup and early boot code plus other random bits. - * - * Maintained by Kumar Gala (see MAINTAINERS for contact information) - * - * Copyright 2005 Freescale Semiconductor Inc. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/seq_file.h> -#include <linux/of_platform.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <asm/mpic.h> -#include <mm/mmu_decl.h> -#include <asm/udbg.h> - -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> - -#ifdef CONFIG_CPM2 -#include <asm/cpm2.h> -#include <sysdev/cpm2_pic.h> -#endif - -#include "mpc85xx.h" - -static void __init mpc85xx_ads_pic_init(void) -{ - struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, - 0, 256, " OpenPIC "); - BUG_ON(mpic == NULL); - mpic_init(mpic); - - mpc85xx_cpm2_pic_init(); -} - -/* - * Setup the architecture - */ -#ifdef CONFIG_CPM2 -struct cpm_pin { - int port, pin, flags; -}; - -static const struct cpm_pin mpc8560_ads_pins[] = { - /* SCC1 */ - {3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, - {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* SCC2 */ - {2, 12, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 13, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {3, 26, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {3, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {3, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - - /* FCC2 */ - {1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, - {1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK14 */ - {2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK13 */ - - /* FCC3 */ - {1, 4, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 6, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 9, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 12, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 13, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 14, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 15, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, - {1, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {1, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, - {2, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK16 */ - {2, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK15 */ - {2, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, -}; - -static void __init init_ioports(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mpc8560_ads_pins); i++) { - const struct cpm_pin *pin = &mpc8560_ads_pins[i]; - cpm2_set_pin(pin->port, pin->pin, pin->flags); - } - - cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); - cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK15, CPM_CLK_RX); - cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK16, CPM_CLK_TX); -} -#endif - -static void __init mpc85xx_ads_setup_arch(void) -{ - if (ppc_md.progress) - ppc_md.progress("mpc85xx_ads_setup_arch()", 0); - -#ifdef CONFIG_CPM2 - cpm2_reset(); - init_ioports(); -#endif - - fsl_pci_assign_primary(); -} - -static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) -{ - uint pvid, svid, phid1; - - pvid = mfspr(SPRN_PVR); - svid = mfspr(SPRN_SVR); - - seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); - seq_printf(m, "PVR\t\t: 0x%x\n", pvid); - seq_printf(m, "SVR\t\t: 0x%x\n", svid); - - /* Display cpu Pll setting */ - phid1 = mfspr(SPRN_HID1); - seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); -} - -machine_arch_initcall(mpc85xx_ads, mpc85xx_common_publish_devices); - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc85xx_ads_probe(void) -{ - return of_machine_is_compatible("MPC85xxADS"); -} - -define_machine(mpc85xx_ads) { - .name = "MPC85xx ADS", - .probe = mpc85xx_ads_probe, - .setup_arch = mpc85xx_ads_setup_arch, - .init_IRQ = mpc85xx_ads_pic_init, - .show_cpuinfo = mpc85xx_ads_show_cpuinfo, - .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c deleted file mode 100644 index 48f3acfece0b..000000000000 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ /dev/null @@ -1,396 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * MPC85xx setup and early boot code plus other random bits. - * - * Maintained by Kumar Gala (see MAINTAINERS for contact information) - * - * Copyright 2005, 2011-2012 Freescale Semiconductor Inc. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/reboot.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/major.h> -#include <linux/console.h> -#include <linux/delay.h> -#include <linux/seq_file.h> -#include <linux/initrd.h> -#include <linux/interrupt.h> -#include <linux/fsl_devices.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/of_platform.h> -#include <linux/pgtable.h> - -#include <asm/page.h> -#include <linux/atomic.h> -#include <asm/time.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/ipic.h> -#include <asm/pci-bridge.h> -#include <asm/irq.h> -#include <mm/mmu_decl.h> -#include <asm/udbg.h> -#include <asm/mpic.h> -#include <asm/i8259.h> - -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> - -#include "mpc85xx.h" - -/* - * The CDS board contains an FPGA/CPLD called "Cadmus", which collects - * various logic and performs system control functions. - * Here is the FPGA/CPLD register map. - */ -struct cadmus_reg { - u8 cm_ver; /* Board version */ - u8 cm_csr; /* General control/status */ - u8 cm_rst; /* Reset control */ - u8 cm_hsclk; /* High speed clock */ - u8 cm_hsxclk; /* High speed clock extended */ - u8 cm_led; /* LED data */ - u8 cm_pci; /* PCI control/status */ - u8 cm_dma; /* DMA control */ - u8 res[248]; /* Total 256 bytes */ -}; - -static struct cadmus_reg *cadmus; - -#ifdef CONFIG_PCI - -#define ARCADIA_HOST_BRIDGE_IDSEL 17 -#define ARCADIA_2ND_BRIDGE_IDSEL 3 - -static int mpc85xx_exclude_device(struct pci_controller *hose, - u_char bus, u_char devfn) -{ - /* We explicitly do not go past the Tundra 320 Bridge */ - if ((bus == 1) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL)) - return PCIBIOS_DEVICE_NOT_FOUND; - if ((bus == 0) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL)) - return PCIBIOS_DEVICE_NOT_FOUND; - else - return PCIBIOS_SUCCESSFUL; -} - -static int mpc85xx_cds_restart(struct notifier_block *this, - unsigned long mode, void *cmd) -{ - struct pci_dev *dev; - u_char tmp; - - if ((dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, - NULL))) { - - /* Use the VIA Super Southbridge to force a PCI reset */ - pci_read_config_byte(dev, 0x47, &tmp); - pci_write_config_byte(dev, 0x47, tmp | 1); - - /* Flush the outbound PCI write queues */ - pci_read_config_byte(dev, 0x47, &tmp); - - /* - * At this point, the hardware reset should have triggered. - * However, if it doesn't work for some mysterious reason, - * just fall through to the default reset below. - */ - - pci_dev_put(dev); - } - - /* - * If we can't find the VIA chip (maybe the P2P bridge is - * disabled) or the VIA chip reset didn't work, just return - * and let default reset sequence happen. - */ - return NOTIFY_DONE; -} - -static int mpc85xx_cds_restart_register(void) -{ - static struct notifier_block restart_handler; - - restart_handler.notifier_call = mpc85xx_cds_restart; - restart_handler.priority = 192; - - return register_restart_handler(&restart_handler); -} -machine_arch_initcall(mpc85xx_cds, mpc85xx_cds_restart_register); - - -static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev) -{ - u_char c; - if (dev->vendor == PCI_VENDOR_ID_VIA) { - switch (dev->device) { - case PCI_DEVICE_ID_VIA_82C586_1: - /* - * U-Boot does not set the enable bits - * for the IDE device. Force them on here. - */ - pci_read_config_byte(dev, 0x40, &c); - c |= 0x03; /* IDE: Chip Enable Bits */ - pci_write_config_byte(dev, 0x40, c); - - /* - * Since only primary interface works, force the - * IDE function to standard primary IDE interrupt - * w/ 8259 offset - */ - dev->irq = 14; - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); - break; - /* - * Force legacy USB interrupt routing - */ - case PCI_DEVICE_ID_VIA_82C586_2: - /* There are two USB controllers. - * Identify them by function number - */ - if (PCI_FUNC(dev->devfn) == 3) - dev->irq = 11; - else - dev->irq = 10; - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); - default: - break; - } - } -} - -static void skip_fake_bridge(struct pci_dev *dev) -{ - /* Make it an error to skip the fake bridge - * in pci_setup_device() in probe.c */ - dev->hdr_type = 0x7f; -} -DECLARE_PCI_FIXUP_EARLY(0x1957, 0x3fff, skip_fake_bridge); -DECLARE_PCI_FIXUP_EARLY(0x3fff, 0x1957, skip_fake_bridge); -DECLARE_PCI_FIXUP_EARLY(0xff3f, 0x5719, skip_fake_bridge); - -#define PCI_DEVICE_ID_IDT_TSI310 0x01a7 - -/* - * Fix Tsi310 PCI-X bridge resource. - * Force the bridge to open a window from 0x0000-0x1fff in PCI I/O space. - * This allows legacy I/O(i8259, etc) on the VIA southbridge to be accessed. - */ -void mpc85xx_cds_fixup_bus(struct pci_bus *bus) -{ - struct pci_dev *dev = bus->self; - struct resource *res = bus->resource[0]; - - if (dev != NULL && - dev->vendor == PCI_VENDOR_ID_IBM && - dev->device == PCI_DEVICE_ID_IDT_TSI310) { - if (res) { - res->start = 0; - res->end = 0x1fff; - res->flags = IORESOURCE_IO; - pr_info("mpc85xx_cds: PCI bridge resource fixup applied\n"); - pr_info("mpc85xx_cds: %pR\n", res); - } - } - - fsl_pcibios_fixup_bus(bus); -} - -#ifdef CONFIG_PPC_I8259 -static void mpc85xx_8259_cascade_handler(struct irq_desc *desc) -{ - unsigned int cascade_irq = i8259_irq(); - - if (cascade_irq) - /* handle an interrupt from the 8259 */ - generic_handle_irq(cascade_irq); - - /* check for any interrupts from the shared IRQ line */ - handle_fasteoi_irq(desc); -} - -static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) -{ - return IRQ_HANDLED; -} -#endif /* PPC_I8259 */ -#endif /* CONFIG_PCI */ - -static void __init mpc85xx_cds_pic_init(void) -{ - struct mpic *mpic; - mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, - 0, 256, " OpenPIC "); - BUG_ON(mpic == NULL); - mpic_init(mpic); -} - -#if defined(CONFIG_PPC_I8259) && defined(CONFIG_PCI) -static int mpc85xx_cds_8259_attach(void) -{ - int ret; - struct device_node *np = NULL; - struct device_node *cascade_node = NULL; - int cascade_irq; - - /* Initialize the i8259 controller */ - for_each_node_by_type(np, "interrupt-controller") - if (of_device_is_compatible(np, "chrp,iic")) { - cascade_node = np; - break; - } - - if (cascade_node == NULL) { - printk(KERN_DEBUG "Could not find i8259 PIC\n"); - return -ENODEV; - } - - cascade_irq = irq_of_parse_and_map(cascade_node, 0); - if (!cascade_irq) { - printk(KERN_ERR "Failed to map cascade interrupt\n"); - return -ENXIO; - } - - i8259_init(cascade_node, 0); - of_node_put(cascade_node); - - /* - * Hook the interrupt to make sure desc->action is never NULL. - * This is required to ensure that the interrupt does not get - * disabled when the last user of the shared IRQ line frees their - * interrupt. - */ - ret = request_irq(cascade_irq, mpc85xx_8259_cascade_action, - IRQF_SHARED | IRQF_NO_THREAD, "8259 cascade", - cascade_node); - if (ret) { - printk(KERN_ERR "Failed to setup cascade interrupt\n"); - return ret; - } - - /* Success. Connect our low-level cascade handler. */ - irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler); - - return 0; -} -machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach); - -#endif /* CONFIG_PPC_I8259 */ - -static void __init mpc85xx_cds_pci_assign_primary(void) -{ -#ifdef CONFIG_PCI - struct device_node *np; - - if (fsl_pci_primary) - return; - - /* - * MPC85xx_CDS has ISA bridge but unfortunately there is no - * isa node in device tree. We now looking for i8259 node as - * a workaround for such a broken device tree. This routine - * is for complying to all device trees. - */ - np = of_find_node_by_name(NULL, "i8259"); - while ((fsl_pci_primary = of_get_parent(np))) { - of_node_put(np); - np = fsl_pci_primary; - - if ((of_device_is_compatible(np, "fsl,mpc8540-pci") || - of_device_is_compatible(np, "fsl,mpc8548-pcie")) && - of_device_is_available(np)) - return; - } -#endif -} - -/* - * Setup the architecture - */ -static void __init mpc85xx_cds_setup_arch(void) -{ - struct device_node *np; - int cds_pci_slot; - - if (ppc_md.progress) - ppc_md.progress("mpc85xx_cds_setup_arch()", 0); - - np = of_find_compatible_node(NULL, NULL, "fsl,mpc8548cds-fpga"); - if (!np) { - pr_err("Could not find FPGA node.\n"); - return; - } - - cadmus = of_iomap(np, 0); - of_node_put(np); - if (!cadmus) { - pr_err("Fail to map FPGA area.\n"); - return; - } - - if (ppc_md.progress) { - char buf[40]; - cds_pci_slot = ((in_8(&cadmus->cm_csr) >> 6) & 0x3) + 1; - snprintf(buf, 40, "CDS Version = 0x%x in slot %d\n", - in_8(&cadmus->cm_ver), cds_pci_slot); - ppc_md.progress(buf, 0); - } - -#ifdef CONFIG_PCI - ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup; - ppc_md.pci_exclude_device = mpc85xx_exclude_device; -#endif - - mpc85xx_cds_pci_assign_primary(); - fsl_pci_assign_primary(); -} - -static void mpc85xx_cds_show_cpuinfo(struct seq_file *m) -{ - uint pvid, svid, phid1; - - pvid = mfspr(SPRN_PVR); - svid = mfspr(SPRN_SVR); - - seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); - seq_printf(m, "Machine\t\t: MPC85xx CDS (0x%x)\n", - in_8(&cadmus->cm_ver)); - seq_printf(m, "PVR\t\t: 0x%x\n", pvid); - seq_printf(m, "SVR\t\t: 0x%x\n", svid); - - /* Display cpu Pll setting */ - phid1 = mfspr(SPRN_HID1); - seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); -} - - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc85xx_cds_probe(void) -{ - return of_machine_is_compatible("MPC85xxCDS"); -} - -machine_arch_initcall(mpc85xx_cds, mpc85xx_common_publish_devices); - -define_machine(mpc85xx_cds) { - .name = "MPC85xx CDS", - .probe = mpc85xx_cds_probe, - .setup_arch = mpc85xx_cds_setup_arch, - .init_IRQ = mpc85xx_cds_pic_init, - .show_cpuinfo = mpc85xx_cds_show_cpuinfo, - .get_irq = mpic_get_irq, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = mpc85xx_cds_fixup_bus, - .pcibios_fixup_phb = fsl_pcibios_fixup_phb, -#endif - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index f8d2c97f39bd..2856148321b3 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -15,8 +15,8 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <linux/of.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> @@ -26,6 +26,7 @@ #include <asm/mpic.h> #include <asm/i8259.h> #include <asm/swiotlb.h> +#include <asm/ppc-pci.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> @@ -33,113 +34,22 @@ #include "mpc85xx.h" -#undef DEBUG - -#ifdef DEBUG -#define DBG(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) -#else -#define DBG(fmt, args...) -#endif - -#ifdef CONFIG_PPC_I8259 -static void mpc85xx_8259_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int cascade_irq = i8259_irq(); - - if (cascade_irq) { - generic_handle_irq(cascade_irq); - } - chip->irq_eoi(&desc->irq_data); -} -#endif /* CONFIG_PPC_I8259 */ - -void __init mpc85xx_ds_pic_init(void) +static void __init mpc85xx_ds_pic_init(void) { struct mpic *mpic; -#ifdef CONFIG_PPC_I8259 - struct device_node *np; - struct device_node *cascade_node = NULL; - int cascade_irq; -#endif - if (of_machine_is_compatible("fsl,MPC8572DS-CAMP")) { - mpic = mpic_alloc(NULL, 0, - MPIC_NO_RESET | - MPIC_BIG_ENDIAN | - MPIC_SINGLE_DEST_CPU, - 0, 256, " OpenPIC "); - } else { - mpic = mpic_alloc(NULL, 0, - MPIC_BIG_ENDIAN | - MPIC_SINGLE_DEST_CPU, - 0, 256, " OpenPIC "); - } - - BUG_ON(mpic == NULL); - mpic_init(mpic); + int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU; -#ifdef CONFIG_PPC_I8259 - /* Initialize the i8259 controller */ - for_each_node_by_type(np, "interrupt-controller") - if (of_device_is_compatible(np, "chrp,iic")) { - cascade_node = np; - break; - } + if (of_machine_is_compatible("fsl,MPC8572DS-CAMP")) + flags |= MPIC_NO_RESET; - if (cascade_node == NULL) { - printk(KERN_DEBUG "Could not find i8259 PIC\n"); - return; - } + mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); - cascade_irq = irq_of_parse_and_map(cascade_node, 0); - if (!cascade_irq) { - printk(KERN_ERR "Failed to map cascade interrupt\n"); + if (WARN_ON(!mpic)) return; - } - - DBG("mpc85xxds: cascade mapped to irq %d\n", cascade_irq); - - i8259_init(cascade_node, 0); - of_node_put(cascade_node); - - irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade); -#endif /* CONFIG_PPC_I8259 */ -} - -#ifdef CONFIG_PCI -extern int uli_exclude_device(struct pci_controller *hose, - u_char bus, u_char devfn); - -static struct device_node *pci_with_uli; -static int mpc85xx_exclude_device(struct pci_controller *hose, - u_char bus, u_char devfn) -{ - if (hose->dn == pci_with_uli) - return uli_exclude_device(hose, bus, devfn); - - return PCIBIOS_SUCCESSFUL; -} -#endif /* CONFIG_PCI */ - -static void __init mpc85xx_ds_uli_init(void) -{ -#ifdef CONFIG_PCI - struct device_node *node; - - /* See if we have a ULI under the primary */ - - node = of_find_node_by_name(NULL, "uli1575"); - while ((pci_with_uli = of_get_parent(node))) { - of_node_put(node); - node = pci_with_uli; + mpic_init(mpic); - if (pci_with_uli == fsl_pci_primary) { - ppc_md.pci_exclude_device = mpc85xx_exclude_device; - break; - } - } -#endif + mpc85xx_8259_init(); } /* @@ -152,43 +62,18 @@ static void __init mpc85xx_ds_setup_arch(void) swiotlb_detect_4g(); fsl_pci_assign_primary(); - mpc85xx_ds_uli_init(); + uli_init(); mpc85xx_smp_init(); - printk("MPC85xx DS board from Freescale Semiconductor\n"); -} - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc8544_ds_probe(void) -{ - return !!of_machine_is_compatible("MPC8544DS"); + pr_info("MPC85xx DS board from Freescale Semiconductor\n"); } machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices); machine_arch_initcall(mpc8572_ds, mpc85xx_common_publish_devices); -machine_arch_initcall(p2020_ds, mpc85xx_common_publish_devices); - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc8572_ds_probe(void) -{ - return !!of_machine_is_compatible("fsl,MPC8572DS"); -} - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p2020_ds_probe(void) -{ - return !!of_machine_is_compatible("fsl,P2020DS"); -} define_machine(mpc8544_ds) { .name = "MPC8544 DS", - .probe = mpc8544_ds_probe, + .compatible = "MPC8544DS", .setup_arch = mpc85xx_ds_setup_arch, .init_IRQ = mpc85xx_ds_pic_init, #ifdef CONFIG_PCI @@ -196,27 +81,12 @@ define_machine(mpc8544_ds) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(mpc8572_ds) { .name = "MPC8572 DS", - .probe = mpc8572_ds_probe, - .setup_arch = mpc85xx_ds_setup_arch, - .init_IRQ = mpc85xx_ds_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, - .pcibios_fixup_phb = fsl_pcibios_fixup_phb, -#endif - .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; - -define_machine(p2020_ds) { - .name = "P2020 DS", - .probe = p2020_ds_probe, + .compatible = "fsl,MPC8572DS", .setup_arch = mpc85xx_ds_setup_arch, .init_IRQ = mpc85xx_ds_pic_init, #ifdef CONFIG_PCI @@ -224,6 +94,5 @@ define_machine(p2020_ds) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 3a2ac410af18..c19490cf6376 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -26,8 +26,8 @@ #include <linux/seq_file.h> #include <linux/initrd.h> #include <linux/fsl_devices.h> -#include <linux/of_platform.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <linux/phy.h> #include <linux/memblock.h> #include <linux/fsl/guts.h> @@ -49,13 +49,6 @@ #include "mpc85xx.h" -#undef DEBUG -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - #if IS_BUILTIN(CONFIG_PHYLIB) #define MV88E1111_SCR 0x10 @@ -339,18 +332,12 @@ static void __init mpc85xx_mds_pic_init(void) mpic_init(mpic); } -static int __init mpc85xx_mds_probe(void) -{ - return of_machine_is_compatible("MPC85xxMDS"); -} - define_machine(mpc8568_mds) { .name = "MPC8568 MDS", - .probe = mpc85xx_mds_probe, + .compatible = "MPC85xxMDS", .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, @@ -358,18 +345,12 @@ define_machine(mpc8568_mds) { #endif }; -static int __init mpc8569_mds_probe(void) -{ - return of_machine_is_compatible("fsl,MPC8569EMDS"); -} - define_machine(mpc8569_mds) { .name = "MPC8569 MDS", - .probe = mpc8569_mds_probe, + .compatible = "fsl,MPC8569EMDS", .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, @@ -377,19 +358,12 @@ define_machine(mpc8569_mds) { #endif }; -static int __init p1021_mds_probe(void) -{ - return of_machine_is_compatible("fsl,P1021MDS"); - -} - define_machine(p1021_mds) { .name = "P1021 MDS", - .probe = p1021_mds_probe, + .compatible = "fsl,P1021MDS", .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c index d99aba158235..e0cec670d8db 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c @@ -12,7 +12,8 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <linux/fsl/guts.h> #include <asm/time.h> @@ -29,32 +30,19 @@ #include "mpc85xx.h" -#undef DEBUG - -#ifdef DEBUG -#define DBG(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) -#else -#define DBG(fmt, args...) -#endif - - -void __init mpc85xx_rdb_pic_init(void) +static void __init mpc85xx_rdb_pic_init(void) { struct mpic *mpic; + int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU; - if (of_machine_is_compatible("fsl,MPC85XXRDB-CAMP")) { - mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET | - MPIC_BIG_ENDIAN | - MPIC_SINGLE_DEST_CPU, - 0, 256, " OpenPIC "); - } else { - mpic = mpic_alloc(NULL, 0, - MPIC_BIG_ENDIAN | - MPIC_SINGLE_DEST_CPU, - 0, 256, " OpenPIC "); - } + if (of_machine_is_compatible("fsl,MPC85XXRDB-CAMP")) + flags |= MPIC_NO_RESET; + + mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); + + if (WARN_ON(!mpic)) + return; - BUG_ON(mpic == NULL); mpic_init(mpic); } @@ -70,7 +58,6 @@ static void __init mpc85xx_rdb_setup_arch(void) fsl_pci_assign_primary(); -#ifdef CONFIG_QUICC_ENGINE mpc85xx_qe_par_io_init(); #if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE) if (machine_is(p1025_rdb)) { @@ -89,7 +76,7 @@ static void __init mpc85xx_rdb_setup_arch(void) /* P1025 has pins muxed for QE and other functions. To * enable QE UEC mode, we need to set bit QE0 for UCC1 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 - * and QE12 for QE MII management singals in PMUXCR + * and QE12 for QE MII management signals in PMUXCR * register. */ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) | @@ -103,13 +90,10 @@ static void __init mpc85xx_rdb_setup_arch(void) } #endif -#endif /* CONFIG_QUICC_ENGINE */ - printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n"); + pr_info("MPC85xx RDB board from Freescale Semiconductor\n"); } -machine_arch_initcall(p2020_rdb, mpc85xx_common_publish_devices); -machine_arch_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices); @@ -119,84 +103,9 @@ machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices); machine_arch_initcall(p1024_rdb, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p2020_rdb_probe(void) -{ - if (of_machine_is_compatible("fsl,P2020RDB")) - return 1; - return 0; -} - -static int __init p1020_rdb_probe(void) -{ - if (of_machine_is_compatible("fsl,P1020RDB")) - return 1; - return 0; -} - -static int __init p1020_rdb_pc_probe(void) -{ - return of_machine_is_compatible("fsl,P1020RDB-PC"); -} - -static int __init p1020_rdb_pd_probe(void) -{ - return of_machine_is_compatible("fsl,P1020RDB-PD"); -} - -static int __init p1021_rdb_pc_probe(void) -{ - if (of_machine_is_compatible("fsl,P1021RDB-PC")) - return 1; - return 0; -} - -static int __init p2020_rdb_pc_probe(void) -{ - if (of_machine_is_compatible("fsl,P2020RDB-PC")) - return 1; - return 0; -} - -static int __init p1025_rdb_probe(void) -{ - return of_machine_is_compatible("fsl,P1025RDB"); -} - -static int __init p1020_mbg_pc_probe(void) -{ - return of_machine_is_compatible("fsl,P1020MBG-PC"); -} - -static int __init p1020_utm_pc_probe(void) -{ - return of_machine_is_compatible("fsl,P1020UTM-PC"); -} - -static int __init p1024_rdb_probe(void) -{ - return of_machine_is_compatible("fsl,P1024RDB"); -} - -define_machine(p2020_rdb) { - .name = "P2020 RDB", - .probe = p2020_rdb_probe, - .setup_arch = mpc85xx_rdb_setup_arch, - .init_IRQ = mpc85xx_rdb_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, - .pcibios_fixup_phb = fsl_pcibios_fixup_phb, -#endif - .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; - define_machine(p1020_rdb) { .name = "P1020 RDB", - .probe = p1020_rdb_probe, + .compatible = "fsl,P1020RDB", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI @@ -204,27 +113,12 @@ define_machine(p1020_rdb) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1021_rdb_pc) { .name = "P1021 RDB-PC", - .probe = p1021_rdb_pc_probe, - .setup_arch = mpc85xx_rdb_setup_arch, - .init_IRQ = mpc85xx_rdb_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, - .pcibios_fixup_phb = fsl_pcibios_fixup_phb, -#endif - .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; - -define_machine(p2020_rdb_pc) { - .name = "P2020RDB-PC", - .probe = p2020_rdb_pc_probe, + .compatible = "fsl,P1021RDB-PC", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI @@ -232,13 +126,12 @@ define_machine(p2020_rdb_pc) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1025_rdb) { .name = "P1025 RDB", - .probe = p1025_rdb_probe, + .compatible = "fsl,P1025RDB", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI @@ -246,13 +139,12 @@ define_machine(p1025_rdb) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1020_mbg_pc) { .name = "P1020 MBG-PC", - .probe = p1020_mbg_pc_probe, + .compatible = "fsl,P1020MBG-PC", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI @@ -260,13 +152,12 @@ define_machine(p1020_mbg_pc) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1020_utm_pc) { .name = "P1020 UTM-PC", - .probe = p1020_utm_pc_probe, + .compatible = "fsl,P1020UTM-PC", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI @@ -274,13 +165,12 @@ define_machine(p1020_utm_pc) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1020_rdb_pc) { .name = "P1020RDB-PC", - .probe = p1020_rdb_pc_probe, + .compatible = "fsl,P1020RDB-PC", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI @@ -288,13 +178,12 @@ define_machine(p1020_rdb_pc) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1020_rdb_pd) { .name = "P1020RDB-PD", - .probe = p1020_rdb_pd_probe, + .compatible = "fsl,P1020RDB-PD", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI @@ -302,13 +191,12 @@ define_machine(p1020_rdb_pd) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1024_rdb) { .name = "P1024 RDB", - .probe = p1024_rdb_probe, + .compatible = "fsl,P1024RDB", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI @@ -316,6 +204,5 @@ define_machine(p1024_rdb) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/mvme2500.c b/arch/powerpc/platforms/85xx/mvme2500.c index 69d5aa082a4b..19122daadb55 100644 --- a/arch/powerpc/platforms/85xx/mvme2500.c +++ b/arch/powerpc/platforms/85xx/mvme2500.c @@ -21,7 +21,7 @@ #include "mpc85xx.h" -void __init mvme2500_pic_init(void) +static void __init mvme2500_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, @@ -43,17 +43,9 @@ static void __init mvme2500_setup_arch(void) machine_arch_initcall(mvme2500, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mvme2500_probe(void) -{ - return of_machine_is_compatible("artesyn,MVME2500"); -} - define_machine(mvme2500) { .name = "MVME2500", - .probe = mvme2500_probe, + .compatible = "artesyn,MVME2500", .setup_arch = mvme2500_setup_arch, .init_IRQ = mvme2500_pic_init, #ifdef CONFIG_PCI @@ -61,6 +53,5 @@ define_machine(mvme2500) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c index 8ba9306a96b6..491895ac8bcf 100644 --- a/arch/powerpc/platforms/85xx/p1010rdb.c +++ b/arch/powerpc/platforms/85xx/p1010rdb.c @@ -10,7 +10,7 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> @@ -24,7 +24,7 @@ #include "mpc85xx.h" -void __init p1010_rdb_pic_init(void) +static void __init p1010_rdb_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, @@ -73,6 +73,5 @@ define_machine(p1010_rdb) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 537599906146..adc3a2ee1415 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c @@ -18,8 +18,8 @@ #include <linux/fsl/guts.h> #include <linux/pci.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <asm/div64.h> #include <asm/mpic.h> #include <asm/swiotlb.h> @@ -370,7 +370,7 @@ exit: * * @pixclock: the wavelength, in picoseconds, of the clock */ -void p1022ds_set_pixel_clock(unsigned int pixclock) +static void p1022ds_set_pixel_clock(unsigned int pixclock) { struct device_node *guts_np = NULL; struct ccsr_guts __iomem *guts; @@ -418,7 +418,7 @@ void p1022ds_set_pixel_clock(unsigned int pixclock) /** * p1022ds_valid_monitor_port: set the monitor port for sysfs */ -enum fsl_diu_monitor_port +static enum fsl_diu_monitor_port p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port) { switch (port) { @@ -432,7 +432,7 @@ p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port) #endif -void __init p1022_ds_pic_init(void) +static void __init p1022_ds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, @@ -549,17 +549,9 @@ static void __init p1022_ds_setup_arch(void) machine_arch_initcall(p1022_ds, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p1022_ds_probe(void) -{ - return of_machine_is_compatible("fsl,p1022ds"); -} - define_machine(p1022_ds) { .name = "P1022 DS", - .probe = p1022_ds_probe, + .compatible = "fsl,p1022ds", .setup_arch = p1022_ds_setup_arch, .init_IRQ = p1022_ds_pic_init, #ifdef CONFIG_PCI @@ -567,6 +559,5 @@ define_machine(p1022_ds) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c index bc58a99164c9..6198299d95b1 100644 --- a/arch/powerpc/platforms/85xx/p1022_rdk.c +++ b/arch/powerpc/platforms/85xx/p1022_rdk.c @@ -14,8 +14,8 @@ #include <linux/fsl/guts.h> #include <linux/pci.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <asm/div64.h> #include <asm/mpic.h> #include <asm/swiotlb.h> @@ -40,7 +40,7 @@ * * @pixclock: the wavelength, in picoseconds, of the clock */ -void p1022rdk_set_pixel_clock(unsigned int pixclock) +static void p1022rdk_set_pixel_clock(unsigned int pixclock) { struct device_node *guts_np = NULL; struct ccsr_guts __iomem *guts; @@ -88,7 +88,7 @@ void p1022rdk_set_pixel_clock(unsigned int pixclock) /** * p1022rdk_valid_monitor_port: set the monitor port for sysfs */ -enum fsl_diu_monitor_port +static enum fsl_diu_monitor_port p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port) { return FSL_DIU_PORT_DVI; @@ -96,7 +96,7 @@ p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port) #endif -void __init p1022_rdk_pic_init(void) +static void __init p1022_rdk_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, @@ -129,17 +129,9 @@ static void __init p1022_rdk_setup_arch(void) machine_arch_initcall(p1022_rdk, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p1022_rdk_probe(void) -{ - return of_machine_is_compatible("fsl,p1022rdk"); -} - define_machine(p1022_rdk) { .name = "P1022 RDK", - .probe = p1022_rdk_probe, + .compatible = "fsl,p1022rdk", .setup_arch = p1022_rdk_setup_arch, .init_IRQ = p1022_rdk_pic_init, #ifdef CONFIG_PCI @@ -147,6 +139,5 @@ define_machine(p1022_rdk) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c index c04868eb2eb1..e4fa8731fd2d 100644 --- a/arch/powerpc/platforms/85xx/p1023_rdb.c +++ b/arch/powerpc/platforms/85xx/p1023_rdb.c @@ -15,9 +15,8 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/fsl_devices.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> -#include <linux/of_device.h> #include <asm/time.h> #include <asm/machdep.h> @@ -37,7 +36,7 @@ * Setup the architecture * */ -static void __init mpc85xx_rdb_setup_arch(void) +static void __init p1023_rdb_setup_arch(void) { struct device_node *np; @@ -83,7 +82,7 @@ static void __init mpc85xx_rdb_setup_arch(void) machine_arch_initcall(p1023_rdb, mpc85xx_common_publish_devices); -static void __init mpc85xx_rdb_pic_init(void) +static void __init p1023_rdb_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, @@ -94,19 +93,12 @@ static void __init mpc85xx_rdb_pic_init(void) mpic_init(mpic); } -static int __init p1023_rdb_probe(void) -{ - return of_machine_is_compatible("fsl,P1023RDB"); - -} - define_machine(p1023_rdb) { .name = "P1023 RDB", - .probe = p1023_rdb_probe, - .setup_arch = mpc85xx_rdb_setup_arch, - .init_IRQ = mpc85xx_rdb_pic_init, + .compatible = "fsl,P1023RDB", + .setup_arch = p1023_rdb_setup_arch, + .init_IRQ = p1023_rdb_pic_init, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, diff --git a/arch/powerpc/platforms/85xx/p2020.c b/arch/powerpc/platforms/85xx/p2020.c new file mode 100644 index 000000000000..0e4d715145af --- /dev/null +++ b/arch/powerpc/platforms/85xx/p2020.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale P2020 board Setup + * + * Copyright 2007,2009,2012-2013 Freescale Semiconductor Inc. + * Copyright 2022-2023 Pali Rohár <pali@kernel.org> + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/of.h> + +#include <asm/machdep.h> +#include <asm/udbg.h> +#include <asm/mpic.h> +#include <asm/swiotlb.h> +#include <asm/ppc-pci.h> + +#include <sysdev/fsl_pci.h> + +#include "smp.h" +#include "mpc85xx.h" + +static void __init p2020_pic_init(void) +{ + struct mpic *mpic; + int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU; + + mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); + + if (WARN_ON(!mpic)) + return; + + mpic_init(mpic); + mpc85xx_8259_init(); +} + +/* + * Setup the architecture + */ +static void __init p2020_setup_arch(void) +{ + swiotlb_detect_4g(); + fsl_pci_assign_primary(); + uli_init(); + mpc85xx_smp_init(); + mpc85xx_qe_par_io_init(); +} + +/* + * Called very early, device-tree isn't unflattened + */ +static int __init p2020_probe(void) +{ + struct device_node *p2020_cpu; + + /* + * There is no common compatible string for all P2020 boards. + * The only common thing is "PowerPC,P2020@0" cpu node. + * So check for P2020 board via this cpu node. + */ + p2020_cpu = of_find_node_by_path("/cpus/PowerPC,P2020@0"); + of_node_put(p2020_cpu); + + return !!p2020_cpu; +} + +machine_arch_initcall(p2020, mpc85xx_common_publish_devices); + +define_machine(p2020) { + .name = "Freescale P2020", + .probe = p2020_probe, + .setup_arch = p2020_setup_arch, + .init_IRQ = p2020_pic_init, +#ifdef CONFIG_PCI + .pcibios_fixup_bus = fsl_pcibios_fixup_bus, + .pcibios_fixup_phb = fsl_pcibios_fixup_phb, +#endif + .get_irq = mpic_get_irq, + .progress = udbg_progress, +}; diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c index 0faf2990bf2c..acd19c52ad43 100644 --- a/arch/powerpc/platforms/85xx/ppa8548.c +++ b/arch/powerpc/platforms/85xx/ppa8548.c @@ -72,21 +72,12 @@ static int __init declare_of_platform_devices(void) } machine_device_initcall(ppa8548, declare_of_platform_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init ppa8548_probe(void) -{ - return of_machine_is_compatible("ppa8548"); -} - define_machine(ppa8548) { .name = "ppa8548", - .probe = ppa8548_probe, + .compatible = "ppa8548", .setup_arch = ppa8548_setup_arch, .init_IRQ = ppa8548_pic_init, .show_cpuinfo = ppa8548_show_cpuinfo, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index 64109ad6736c..3cd2f3bd4223 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c @@ -25,7 +25,7 @@ #include "smp.h" #include "mpc85xx.h" -void __init qemu_e500_pic_init(void) +static void __init qemu_e500_pic_init(void) { struct mpic *mpic; unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU | @@ -46,19 +46,11 @@ static void __init qemu_e500_setup_arch(void) mpc85xx_smp_init(); } -/* - * Called very early, device-tree isn't unflattened - */ -static int __init qemu_e500_probe(void) -{ - return !!of_machine_is_compatible("fsl,qemu-e500"); -} - machine_arch_initcall(qemu_e500, mpc85xx_common_publish_devices); define_machine(qemu_e500) { .name = "QEMU e500", - .probe = qemu_e500_probe, + .compatible = "fsl,qemu-e500", .setup_arch = qemu_e500_setup_arch, .init_IRQ = qemu_e500_pic_init, #ifdef CONFIG_PCI @@ -66,11 +58,6 @@ define_machine(qemu_e500) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_coreint_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else .power_save = e500_idle, -#endif }; diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 98ae64075193..e635b27ee718 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -7,10 +7,13 @@ * Copyright 2012 by Servergy, Inc. */ +#define pr_fmt(fmt) "gpio-halt: " fmt + +#include <linux/err.h> #include <linux/platform_device.h> #include <linux/device.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> -#include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/workqueue.h> #include <linux/reboot.h> @@ -18,7 +21,8 @@ #include <asm/machdep.h> -static struct device_node *halt_node; +static struct gpio_desc *halt_gpio; +static int halt_irq; static const struct of_device_id child_match[] = { { @@ -36,23 +40,10 @@ static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn); static void __noreturn gpio_halt_cb(void) { - enum of_gpio_flags flags; - int trigger, gpio; - - if (!halt_node) - panic("No reset GPIO information was provided in DT\n"); - - gpio = of_get_gpio_flags(halt_node, 0, &flags); - - if (!gpio_is_valid(gpio)) - panic("Provided GPIO is invalid\n"); - - trigger = (flags == OF_GPIO_ACTIVE_LOW); - - printk(KERN_INFO "gpio-halt: triggering GPIO.\n"); + pr_info("triggering GPIO.\n"); /* Probably wont return */ - gpio_set_value(gpio, trigger); + gpiod_set_value(halt_gpio, 1); panic("Halt failed\n"); } @@ -61,58 +52,37 @@ static void __noreturn gpio_halt_cb(void) * to handle the shutdown/poweroff. */ static irqreturn_t gpio_halt_irq(int irq, void *__data) { - printk(KERN_INFO "gpio-halt: shutdown due to power button IRQ.\n"); + struct platform_device *pdev = __data; + + dev_info(&pdev->dev, "scheduling shutdown due to power button IRQ\n"); schedule_work(&gpio_halt_wq); return IRQ_HANDLED; }; -static int gpio_halt_probe(struct platform_device *pdev) +static int __gpio_halt_probe(struct platform_device *pdev, + struct device_node *halt_node) { - enum of_gpio_flags flags; - struct device_node *node = pdev->dev.of_node; - int gpio, err, irq; - int trigger; - - if (!node) - return -ENODEV; - - /* If there's no matching child, this isn't really an error */ - halt_node = of_find_matching_node(node, child_match); - if (!halt_node) - return 0; - - /* Technically we could just read the first one, but punish - * DT writers for invalid form. */ - if (of_gpio_count(halt_node) != 1) - return -EINVAL; + int err; - /* Get the gpio number relative to the dynamic base. */ - gpio = of_get_gpio_flags(halt_node, 0, &flags); - if (!gpio_is_valid(gpio)) - return -EINVAL; - - err = gpio_request(gpio, "gpio-halt"); + halt_gpio = fwnode_gpiod_get_index(of_fwnode_handle(halt_node), + NULL, 0, GPIOD_OUT_LOW, "gpio-halt"); + err = PTR_ERR_OR_ZERO(halt_gpio); if (err) { - printk(KERN_ERR "gpio-halt: error requesting GPIO %d.\n", - gpio); - halt_node = NULL; + dev_err(&pdev->dev, "failed to request halt GPIO: %d\n", err); return err; } - trigger = (flags == OF_GPIO_ACTIVE_LOW); - - gpio_direction_output(gpio, !trigger); - /* Now get the IRQ which tells us when the power button is hit */ - irq = irq_of_parse_and_map(halt_node, 0); - err = request_irq(irq, gpio_halt_irq, IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, "gpio-halt", halt_node); + halt_irq = irq_of_parse_and_map(halt_node, 0); + err = request_irq(halt_irq, gpio_halt_irq, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "gpio-halt", pdev); if (err) { - printk(KERN_ERR "gpio-halt: error requesting IRQ %d for " - "GPIO %d.\n", irq, gpio); - gpio_free(gpio); - halt_node = NULL; + dev_err(&pdev->dev, "failed to request IRQ %d: %d\n", + halt_irq, err); + gpiod_put(halt_gpio); + halt_gpio = NULL; return err; } @@ -120,29 +90,40 @@ static int gpio_halt_probe(struct platform_device *pdev) ppc_md.halt = gpio_halt_cb; pm_power_off = gpio_halt_cb; - printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d" - " irq).\n", gpio, trigger, irq); + dev_info(&pdev->dev, "registered halt GPIO, irq: %d\n", halt_irq); return 0; } -static int gpio_halt_remove(struct platform_device *pdev) +static int gpio_halt_probe(struct platform_device *pdev) { - if (halt_node) { - int gpio = of_get_gpio(halt_node, 0); - int irq = irq_of_parse_and_map(halt_node, 0); + struct device_node *halt_node; + int ret; + + if (!pdev->dev.of_node) + return -ENODEV; + + /* If there's no matching child, this isn't really an error */ + halt_node = of_find_matching_node(pdev->dev.of_node, child_match); + if (!halt_node) + return -ENODEV; - free_irq(irq, halt_node); + ret = __gpio_halt_probe(pdev, halt_node); + of_node_put(halt_node); - ppc_md.halt = NULL; - pm_power_off = NULL; + return ret; +} - gpio_free(gpio); +static void gpio_halt_remove(struct platform_device *pdev) +{ + free_irq(halt_irq, pdev); + cancel_work_sync(&gpio_halt_wq); - halt_node = NULL; - } + ppc_md.halt = NULL; + pm_power_off = NULL; - return 0; + gpiod_put(halt_gpio); + halt_gpio = NULL; } static const struct of_device_id gpio_halt_match[] = { diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 9c43cf32f4c9..32fa5fb557c0 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -23,7 +23,7 @@ #include <asm/mpic.h> #include <asm/cacheflush.h> #include <asm/dbell.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/cputhreads.h> #include <asm/fsl_pm.h> @@ -180,7 +180,7 @@ static void wake_hw_thread(void *info) unsigned long inia; int cpu = *(const int *)info; - inia = *(unsigned long *)fsl_secondary_thread_init; + inia = ppc_function_entry(fsl_secondary_thread_init); book3e_start_thread(cpu_thread_in_core(cpu), inia); } #endif @@ -398,6 +398,7 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) hard_irq_disable(); mpic_teardown_this_cpu(secondary); +#ifdef CONFIG_CRASH_DUMP if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { /* * We enter the crash kernel on whatever cpu crashed, @@ -406,9 +407,11 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) */ disable_threadbit = 1; disable_cpu = cpu_first_thread_sibling(cpu); - } else if (sibling != crashing_cpu && - cpu_thread_in_core(cpu) == 0 && - cpu_thread_in_core(sibling) != 0) { + } else if (sibling == crashing_cpu) { + return; + } +#endif + if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) { disable_threadbit = 2; disable_cpu = sibling; } diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c index 09f64470c765..403367b318db 100644 --- a/arch/powerpc/platforms/85xx/socrates.c +++ b/arch/powerpc/platforms/85xx/socrates.c @@ -23,7 +23,7 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> @@ -69,23 +69,11 @@ static void __init socrates_setup_arch(void) machine_arch_initcall(socrates, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init socrates_probe(void) -{ - if (of_machine_is_compatible("abb,socrates")) - return 1; - - return 0; -} - define_machine(socrates) { .name = "Socrates", - .probe = socrates_probe, + .compatible = "abb,socrates", .setup_arch = socrates_setup_arch, .init_IRQ = socrates_pic_init, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 3768c86b9629..4b69fb321a68 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -6,9 +6,10 @@ #include <linux/irq.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> #include <linux/io.h> +#include "socrates_fpga_pic.h" + /* * The FPGA supports 9 interrupt sources, which can be routed to 3 * interrupt request lines of the MPIC. The line to be used can be @@ -82,7 +83,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) if (cause >> (i + 16)) break; } - return irq_linear_revmap(socrates_fpga_pic_irq_host, + return irq_find_mapping(socrates_fpga_pic_irq_host, (irq_hw_number_t)i); } @@ -277,7 +278,7 @@ void __init socrates_fpga_pic_init(struct device_node *pic) int i; /* Setup an irq_domain structure */ - socrates_fpga_pic_irq_host = irq_domain_add_linear(pic, + socrates_fpga_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(pic), SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL); if (socrates_fpga_pic_irq_host == NULL) { pr_err("FPGA PIC: Unable to allocate host\n"); diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index 6b1fe7bb3a8c..c10efc45894c 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c @@ -22,7 +22,7 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> @@ -83,21 +83,12 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m) machine_arch_initcall(stx_gp3, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init stx_gp3_probe(void) -{ - return of_machine_is_compatible("stx,gp3-8560"); -} - define_machine(stx_gp3) { .name = "STX GP3", - .probe = stx_gp3_probe, + .compatible = "stx,gp3-8560", .setup_arch = stx_gp3_setup_arch, .init_IRQ = stx_gp3_pic_init, .show_cpuinfo = stx_gp3_show_cpuinfo, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c index 767eed98a0a8..d4fbb6eff38a 100644 --- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c +++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c @@ -149,4 +149,5 @@ static int __init t1042rdb_diu_init(void) early_initcall(t1042rdb_diu_init); +MODULE_DESCRIPTION("Freescale T1042 DIU driver"); MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index d187f4b8bff6..f74d446c53f0 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c @@ -20,7 +20,7 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> @@ -112,21 +112,12 @@ static const char * const board[] __initconst = { NULL }; -/* - * Called very early, device-tree isn't unflattened - */ -static int __init tqm85xx_probe(void) -{ - return of_device_compatible_match(of_root, board); -} - define_machine(tqm85xx) { .name = "TQM85xx", - .probe = tqm85xx_probe, + .compatibles = board, .setup_arch = tqm85xx_setup_arch, .init_IRQ = tqm85xx_pic_init, .show_cpuinfo = tqm85xx_show_cpuinfo, .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c index eaec099b4077..c0a0456f1674 100644 --- a/arch/powerpc/platforms/85xx/twr_p102x.c +++ b/arch/powerpc/platforms/85xx/twr_p102x.c @@ -13,7 +13,8 @@ #include <linux/errno.h> #include <linux/fsl/guts.h> #include <linux/pci.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <asm/pci-bridge.h> #include <asm/udbg.h> @@ -103,20 +104,14 @@ static void __init twr_p1025_setup_arch(void) machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices); -static int __init twr_p1025_probe(void) -{ - return of_machine_is_compatible("fsl,TWR-P1025"); -} - define_machine(twr_p1025) { .name = "TWR-P1025", - .probe = twr_p1025_probe, + .compatible = "fsl,TWR-P1025", .setup_arch = twr_p1025_setup_arch, .init_IRQ = twr_p1025_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c index 5836e4ecb7a0..2582427d8d01 100644 --- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c +++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c @@ -16,8 +16,8 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> @@ -37,7 +37,7 @@ #define MPC85xx_L2CTL_L2I 0x40000000 /* L2 flash invalidate */ #define MPC85xx_L2CTL_L2SIZ_MASK 0x30000000 /* L2 SRAM size (R/O) */ -void __init xes_mpc85xx_pic_init(void) +static void __init xes_mpc85xx_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); @@ -136,27 +136,9 @@ machine_arch_initcall(xes_mpc8572, mpc85xx_common_publish_devices); machine_arch_initcall(xes_mpc8548, mpc85xx_common_publish_devices); machine_arch_initcall(xes_mpc8540, mpc85xx_common_publish_devices); -/* - * Called very early, device-tree isn't unflattened - */ -static int __init xes_mpc8572_probe(void) -{ - return of_machine_is_compatible("xes,MPC8572"); -} - -static int __init xes_mpc8548_probe(void) -{ - return of_machine_is_compatible("xes,MPC8548"); -} - -static int __init xes_mpc8540_probe(void) -{ - return of_machine_is_compatible("xes,MPC8540"); -} - define_machine(xes_mpc8572) { .name = "X-ES MPC8572", - .probe = xes_mpc8572_probe, + .compatible = "xes,MPC8572", .setup_arch = xes_mpc85xx_setup_arch, .init_IRQ = xes_mpc85xx_pic_init, #ifdef CONFIG_PCI @@ -164,13 +146,12 @@ define_machine(xes_mpc8572) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(xes_mpc8548) { .name = "X-ES MPC8548", - .probe = xes_mpc8548_probe, + .compatible = "xes,MPC8548", .setup_arch = xes_mpc85xx_setup_arch, .init_IRQ = xes_mpc85xx_pic_init, #ifdef CONFIG_PCI @@ -178,13 +159,12 @@ define_machine(xes_mpc8548) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(xes_mpc8540) { .name = "X-ES MPC8540", - .probe = xes_mpc8540_probe, + .compatible = "xes,MPC8540", .setup_arch = xes_mpc85xx_setup_arch, .init_IRQ = xes_mpc85xx_pic_init, #ifdef CONFIG_PCI @@ -192,6 +172,5 @@ define_machine(xes_mpc8540) { .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig index be867abebc83..06b1e5c49d6f 100644 --- a/arch/powerpc/platforms/86xx/Kconfig +++ b/arch/powerpc/platforms/86xx/Kconfig @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -config PPC_86xx menuconfig PPC_86xx bool "86xx-based boards" depends on PPC_BOOK3S_32 @@ -10,23 +9,6 @@ menuconfig PPC_86xx if PPC_86xx -config MPC8641_HPCN - bool "Freescale MPC8641 HPCN" - select PPC_I8259 - select DEFAULT_UIMAGE - select FSL_ULI1575 if PCI - select HAVE_RAPIDIO - select SWIOTLB - help - This option enables support for the MPC8641 HPCN board. - -config MPC8610_HPCD - bool "Freescale MPC8610 HPCD" - select DEFAULT_UIMAGE - select FSL_ULI1575 if PCI - help - This option enables support for the MPC8610 HPCD board. - config GEF_PPC9A bool "GE PPC9A" select DEFAULT_UIMAGE @@ -68,13 +50,5 @@ config MPC8641 select FSL_PCI if PCI select PPC_UDBG_16550 select MPIC - default y if MPC8641_HPCN || GEF_SBC610 || GEF_SBC310 || GEF_PPC9A \ + default y if GEF_SBC610 || GEF_SBC310 || GEF_PPC9A \ || MVME7100 - -config MPC8610 - bool - select HAVE_PCI - select FSL_PCI if PCI - select PPC_UDBG_16550 - select MPIC - default y if MPC8610_HPCD diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile index 5bbe1475bf26..dafbc037ff42 100644 --- a/arch/powerpc/platforms/86xx/Makefile +++ b/arch/powerpc/platforms/86xx/Makefile @@ -5,8 +5,6 @@ obj-y := pic.o common.o obj-$(CONFIG_SMP) += mpc86xx_smp.o -obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o -obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o obj-$(CONFIG_GEF_SBC310) += gef_sbc310.o obj-$(CONFIG_GEF_PPC9A) += gef_ppc9a.o diff --git a/arch/powerpc/platforms/86xx/common.c b/arch/powerpc/platforms/86xx/common.c index 0069d38263e7..a4a550527609 100644 --- a/arch/powerpc/platforms/86xx/common.c +++ b/arch/powerpc/platforms/86xx/common.c @@ -3,7 +3,10 @@ * Routines common to most mpc86xx-based boards. */ +#include <linux/init.h> +#include <linux/mod_devicetable.h> #include <linux/of_platform.h> +#include <asm/reg.h> #include <asm/synch.h> #include "mpc86xx.h" diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c index 8e358fa0bc41..f7f98cca7b91 100644 --- a/arch/powerpc/platforms/86xx/gef_ppc9a.c +++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c @@ -18,8 +18,8 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> @@ -175,33 +175,16 @@ static void gef_ppc9a_nec_fixup(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, gef_ppc9a_nec_fixup); -/* - * Called very early, device-tree isn't unflattened - * - * This function is called to determine whether the BSP is compatible with the - * supplied device-tree, which is assumed to be the correct one for the actual - * board. It is expected that, in the future, a kernel may support multiple - * boards. - */ -static int __init gef_ppc9a_probe(void) -{ - if (of_machine_is_compatible("gef,ppc9a")) - return 1; - - return 0; -} - machine_arch_initcall(gef_ppc9a, mpc86xx_common_publish_devices); define_machine(gef_ppc9a) { .name = "GE PPC9A", - .probe = gef_ppc9a_probe, + .compatible = "gef,ppc9a", .setup_arch = gef_ppc9a_setup_arch, .init_IRQ = gef_ppc9a_init_irq, .show_cpuinfo = gef_ppc9a_show_cpuinfo, .get_irq = mpic_get_irq, .time_init = mpc86xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c index b5b2733567cb..689835f7f088 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc310.c +++ b/arch/powerpc/platforms/86xx/gef_sbc310.c @@ -18,8 +18,8 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> @@ -162,33 +162,16 @@ static void gef_sbc310_nec_fixup(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, gef_sbc310_nec_fixup); -/* - * Called very early, device-tree isn't unflattened - * - * This function is called to determine whether the BSP is compatible with the - * supplied device-tree, which is assumed to be the correct one for the actual - * board. It is expected that, in the future, a kernel may support multiple - * boards. - */ -static int __init gef_sbc310_probe(void) -{ - if (of_machine_is_compatible("gef,sbc310")) - return 1; - - return 0; -} - machine_arch_initcall(gef_sbc310, mpc86xx_common_publish_devices); define_machine(gef_sbc310) { .name = "GE SBC310", - .probe = gef_sbc310_probe, + .compatible = "gef,sbc310", .setup_arch = gef_sbc310_setup_arch, .init_IRQ = gef_sbc310_init_irq, .show_cpuinfo = gef_sbc310_show_cpuinfo, .get_irq = mpic_get_irq, .time_init = mpc86xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c index bb4c8e6b44d0..365f511186ca 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc610.c +++ b/arch/powerpc/platforms/86xx/gef_sbc610.c @@ -18,8 +18,8 @@ #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> @@ -152,33 +152,16 @@ static void gef_sbc610_nec_fixup(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, gef_sbc610_nec_fixup); -/* - * Called very early, device-tree isn't unflattened - * - * This function is called to determine whether the BSP is compatible with the - * supplied device-tree, which is assumed to be the correct one for the actual - * board. It is expected that, in the future, a kernel may support multiple - * boards. - */ -static int __init gef_sbc610_probe(void) -{ - if (of_machine_is_compatible("gef,sbc610")) - return 1; - - return 0; -} - machine_arch_initcall(gef_sbc610, mpc86xx_common_publish_devices); define_machine(gef_sbc610) { .name = "GE SBC610", - .probe = gef_sbc610_probe, + .compatible = "gef,sbc610", .setup_arch = gef_sbc610_setup_arch, .init_IRQ = gef_sbc610_init_irq, .show_cpuinfo = gef_sbc610_show_cpuinfo, .get_irq = mpic_get_irq, .time_init = mpc86xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c deleted file mode 100644 index b593b9afd30a..000000000000 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ /dev/null @@ -1,333 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * MPC8610 HPCD board specific routines - * - * Initial author: Xianghua Xiao <x.xiao@freescale.com> - * Recode: Jason Jin <jason.jin@freescale.com> - * York Sun <yorksun@freescale.com> - * - * Rewrite the interrupt routing. remove the 8259PIC support, - * All the integrated device in ULI use sideband interrupt. - * - * Copyright 2008 Freescale Semiconductor Inc. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/interrupt.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/seq_file.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/fsl/guts.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <mm/mmu_decl.h> -#include <asm/udbg.h> - -#include <asm/mpic.h> - -#include <linux/of_platform.h> -#include <sysdev/fsl_pci.h> -#include <sysdev/fsl_soc.h> - -#include "mpc86xx.h" - -static struct device_node *pixis_node; -static unsigned char *pixis_bdcfg0, *pixis_arch; - -/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */ -#define CLKDVDR_PXCKEN 0x80000000 -#define CLKDVDR_PXCKINV 0x10000000 -#define CLKDVDR_PXCKDLY 0x06000000 -#define CLKDVDR_PXCLK_MASK 0x001F0000 - -#ifdef CONFIG_SUSPEND -static irqreturn_t mpc8610_sw9_irq(int irq, void *data) -{ - pr_debug("%s: PIXIS' event (sw9/wakeup) IRQ handled\n", __func__); - return IRQ_HANDLED; -} - -static void __init mpc8610_suspend_init(void) -{ - int irq; - int ret; - - if (!pixis_node) - return; - - irq = irq_of_parse_and_map(pixis_node, 0); - if (!irq) { - pr_err("%s: can't map pixis event IRQ.\n", __func__); - return; - } - - ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9:wakeup", NULL); - if (ret) { - pr_err("%s: can't request pixis event IRQ: %d\n", - __func__, ret); - irq_dispose_mapping(irq); - } - - enable_irq_wake(irq); -} -#else -static inline void mpc8610_suspend_init(void) { } -#endif /* CONFIG_SUSPEND */ - -static const struct of_device_id mpc8610_ids[] __initconst = { - { .compatible = "fsl,mpc8610-immr", }, - { .compatible = "fsl,mpc8610-guts", }, - /* So that the DMA channel nodes can be probed individually: */ - { .compatible = "fsl,eloplus-dma", }, - /* PCI controllers */ - { .compatible = "fsl,mpc8610-pci", }, - {} -}; - -static int __init mpc8610_declare_of_platform_devices(void) -{ - /* Enable wakeup on PIXIS' event IRQ. */ - mpc8610_suspend_init(); - - mpc86xx_common_publish_devices(); - - /* Without this call, the SSI device driver won't get probed. */ - of_platform_bus_probe(NULL, mpc8610_ids, NULL); - - return 0; -} -machine_arch_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices); - -#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) - -/* - * DIU Area Descriptor - * - * The MPC8610 reference manual shows the bits of the AD register in - * little-endian order, which causes the BLUE_C field to be split into two - * parts. To simplify the definition of the MAKE_AD() macro, we define the - * fields in big-endian order and byte-swap the result. - * - * So even though the registers don't look like they're in the - * same bit positions as they are on the P1022, the same value is written to - * the AD register on the MPC8610 and on the P1022. - */ -#define AD_BYTE_F 0x10000000 -#define AD_ALPHA_C_MASK 0x0E000000 -#define AD_ALPHA_C_SHIFT 25 -#define AD_BLUE_C_MASK 0x01800000 -#define AD_BLUE_C_SHIFT 23 -#define AD_GREEN_C_MASK 0x00600000 -#define AD_GREEN_C_SHIFT 21 -#define AD_RED_C_MASK 0x00180000 -#define AD_RED_C_SHIFT 19 -#define AD_PALETTE 0x00040000 -#define AD_PIXEL_S_MASK 0x00030000 -#define AD_PIXEL_S_SHIFT 16 -#define AD_COMP_3_MASK 0x0000F000 -#define AD_COMP_3_SHIFT 12 -#define AD_COMP_2_MASK 0x00000F00 -#define AD_COMP_2_SHIFT 8 -#define AD_COMP_1_MASK 0x000000F0 -#define AD_COMP_1_SHIFT 4 -#define AD_COMP_0_MASK 0x0000000F -#define AD_COMP_0_SHIFT 0 - -#define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \ - cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \ - (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \ - (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \ - (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ - (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) - -u32 mpc8610hpcd_get_pixel_format(enum fsl_diu_monitor_port port, - unsigned int bits_per_pixel) -{ - static const u32 pixelformat[][3] = { - { - MAKE_AD(3, 0, 2, 1, 3, 8, 8, 8, 8), - MAKE_AD(4, 2, 0, 1, 2, 8, 8, 8, 0), - MAKE_AD(4, 0, 2, 1, 1, 5, 6, 5, 0) - }, - { - MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8), - MAKE_AD(4, 0, 2, 1, 2, 8, 8, 8, 0), - MAKE_AD(4, 2, 0, 1, 1, 5, 6, 5, 0) - }, - }; - unsigned int arch_monitor; - - /* The DVI port is mis-wired on revision 1 of this board. */ - arch_monitor = - ((*pixis_arch == 0x01) && (port == FSL_DIU_PORT_DVI)) ? 0 : 1; - - switch (bits_per_pixel) { - case 32: - return pixelformat[arch_monitor][0]; - case 24: - return pixelformat[arch_monitor][1]; - case 16: - return pixelformat[arch_monitor][2]; - default: - pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel); - return 0; - } -} - -void mpc8610hpcd_set_gamma_table(enum fsl_diu_monitor_port port, - char *gamma_table_base) -{ - int i; - if (port == FSL_DIU_PORT_DLVDS) { - for (i = 0; i < 256*3; i++) - gamma_table_base[i] = (gamma_table_base[i] << 2) | - ((gamma_table_base[i] >> 6) & 0x03); - } -} - -#define PX_BRDCFG0_DVISEL (1 << 3) -#define PX_BRDCFG0_DLINK (1 << 4) -#define PX_BRDCFG0_DIU_MASK (PX_BRDCFG0_DVISEL | PX_BRDCFG0_DLINK) - -void mpc8610hpcd_set_monitor_port(enum fsl_diu_monitor_port port) -{ - switch (port) { - case FSL_DIU_PORT_DVI: - clrsetbits_8(pixis_bdcfg0, PX_BRDCFG0_DIU_MASK, - PX_BRDCFG0_DVISEL | PX_BRDCFG0_DLINK); - break; - case FSL_DIU_PORT_LVDS: - clrsetbits_8(pixis_bdcfg0, PX_BRDCFG0_DIU_MASK, - PX_BRDCFG0_DLINK); - break; - case FSL_DIU_PORT_DLVDS: - clrbits8(pixis_bdcfg0, PX_BRDCFG0_DIU_MASK); - break; - } -} - -/** - * mpc8610hpcd_set_pixel_clock: program the DIU's clock - * - * @pixclock: the wavelength, in picoseconds, of the clock - */ -void mpc8610hpcd_set_pixel_clock(unsigned int pixclock) -{ - struct device_node *guts_np = NULL; - struct ccsr_guts __iomem *guts; - unsigned long freq; - u64 temp; - u32 pxclk; - - /* Map the global utilities registers. */ - guts_np = of_find_compatible_node(NULL, NULL, "fsl,mpc8610-guts"); - if (!guts_np) { - pr_err("mpc8610hpcd: missing global utilities device node\n"); - return; - } - - guts = of_iomap(guts_np, 0); - of_node_put(guts_np); - if (!guts) { - pr_err("mpc8610hpcd: could not map global utilities device\n"); - return; - } - - /* Convert pixclock from a wavelength to a frequency */ - temp = 1000000000000ULL; - do_div(temp, pixclock); - freq = temp; - - /* - * 'pxclk' is the ratio of the platform clock to the pixel clock. - * On the MPC8610, the value programmed into CLKDVDR is the ratio - * minus one. The valid range of values is 2-31. - */ - pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq) - 1; - pxclk = clamp_t(u32, pxclk, 2, 31); - - /* Disable the pixel clock, and set it to non-inverted and no delay */ - clrbits32(&guts->clkdvdr, - CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK); - - /* Enable the clock and set the pxclk */ - setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16)); - - iounmap(guts); -} - -enum fsl_diu_monitor_port -mpc8610hpcd_valid_monitor_port(enum fsl_diu_monitor_port port) -{ - return port; -} - -#endif - -static void __init mpc86xx_hpcd_setup_arch(void) -{ - struct resource r; - unsigned char *pixis; - - if (ppc_md.progress) - ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0); - - fsl_pci_assign_primary(); - -#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) - diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format; - diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table; - diu_ops.set_monitor_port = mpc8610hpcd_set_monitor_port; - diu_ops.set_pixel_clock = mpc8610hpcd_set_pixel_clock; - diu_ops.valid_monitor_port = mpc8610hpcd_valid_monitor_port; -#endif - - pixis_node = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis"); - if (pixis_node) { - of_address_to_resource(pixis_node, 0, &r); - of_node_put(pixis_node); - pixis = ioremap(r.start, 32); - if (!pixis) { - printk(KERN_ERR "Err: can't map FPGA cfg register!\n"); - return; - } - pixis_bdcfg0 = pixis + 8; - pixis_arch = pixis + 1; - } else - printk(KERN_ERR "Err: " - "can't find device node 'fsl,fpga-pixis'\n"); - - printk("MPC86xx HPCD board from Freescale Semiconductor\n"); -} - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc86xx_hpcd_probe(void) -{ - if (of_machine_is_compatible("fsl,MPC8610HPCD")) - return 1; /* Looks good */ - - return 0; -} - -define_machine(mpc86xx_hpcd) { - .name = "MPC86xx HPCD", - .probe = mpc86xx_hpcd_probe, - .setup_arch = mpc86xx_hpcd_setup_arch, - .init_IRQ = mpc86xx_init_irq, - .get_irq = mpic_get_irq, - .time_init = mpc86xx_time_init, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif -}; diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c deleted file mode 100644 index 5294394c9c07..000000000000 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ /dev/null @@ -1,127 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * MPC86xx HPCN board specific routines - * - * Recode: ZHANG WEI <wei.zhang@freescale.com> - * Initial author: Xianghua Xiao <x.xiao@freescale.com> - * - * Copyright 2006 Freescale Semiconductor Inc. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/seq_file.h> -#include <linux/of_platform.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <mm/mmu_decl.h> -#include <asm/udbg.h> -#include <asm/swiotlb.h> - -#include <asm/mpic.h> - -#include <sysdev/fsl_pci.h> -#include <sysdev/fsl_soc.h> - -#include "mpc86xx.h" - -#undef DEBUG - -#ifdef DEBUG -#define DBG(fmt...) do { printk(KERN_ERR fmt); } while(0) -#else -#define DBG(fmt...) do { } while(0) -#endif - -#ifdef CONFIG_PCI -extern int uli_exclude_device(struct pci_controller *hose, - u_char bus, u_char devfn); - -static int mpc86xx_exclude_device(struct pci_controller *hose, - u_char bus, u_char devfn) -{ - if (hose->dn == fsl_pci_primary) - return uli_exclude_device(hose, bus, devfn); - - return PCIBIOS_SUCCESSFUL; -} -#endif /* CONFIG_PCI */ - - -static void __init -mpc86xx_hpcn_setup_arch(void) -{ - if (ppc_md.progress) - ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0); - -#ifdef CONFIG_PCI - ppc_md.pci_exclude_device = mpc86xx_exclude_device; -#endif - - printk("MPC86xx HPCN board from Freescale Semiconductor\n"); - -#ifdef CONFIG_SMP - mpc86xx_smp_init(); -#endif - - fsl_pci_assign_primary(); - - swiotlb_detect_4g(); -} - - -static void -mpc86xx_hpcn_show_cpuinfo(struct seq_file *m) -{ - uint svid = mfspr(SPRN_SVR); - - seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); - - seq_printf(m, "SVR\t\t: 0x%x\n", svid); -} - - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc86xx_hpcn_probe(void) -{ - if (of_machine_is_compatible("fsl,mpc8641hpcn")) - return 1; /* Looks good */ - - return 0; -} - -static const struct of_device_id of_bus_ids[] __initconst = { - { .compatible = "fsl,srio", }, - {}, -}; - -static int __init declare_of_platform_devices(void) -{ - mpc86xx_common_publish_devices(); - of_platform_bus_probe(NULL, of_bus_ids, NULL); - - return 0; -} -machine_arch_initcall(mpc86xx_hpcn, declare_of_platform_devices); - -define_machine(mpc86xx_hpcn) { - .name = "MPC86xx HPCN", - .probe = mpc86xx_hpcn_probe, - .setup_arch = mpc86xx_hpcn_setup_arch, - .init_IRQ = mpc86xx_init_irq, - .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo, - .get_irq = mpic_get_irq, - .time_init = mpc86xx_time_init, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif -}; diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 8a7e55acf090..9be33e41af6d 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -12,7 +12,7 @@ #include <linux/delay.h> #include <linux/pgtable.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/page.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c index b2cc32a32d0b..cee49ecd32d2 100644 --- a/arch/powerpc/platforms/86xx/mvme7100.c +++ b/arch/powerpc/platforms/86xx/mvme7100.c @@ -20,7 +20,6 @@ #include <linux/pci.h> #include <linux/of.h> #include <linux/of_fdt.h> -#include <linux/of_platform.h> #include <linux/of_address.h> #include <asm/udbg.h> #include <asm/mpic.h> @@ -108,7 +107,6 @@ define_machine(mvme7100) { .init_IRQ = mpc86xx_init_irq, .get_irq = mpic_get_irq, .time_init = mpc86xx_time_init, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index 2c32c3488afb..9ca36de23532 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c @@ -6,12 +6,14 @@ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/interrupt.h> +#include <linux/of.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> #include <asm/mpic.h> #include <asm/i8259.h> +#include "mpc86xx.h" + #ifdef CONFIG_PPC_I8259 static void mpc86xx_8259_cascade(struct irq_desc *desc) { diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index 60cc5b537a98..8623aebfac48 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -101,6 +101,7 @@ comment "Generic MPC8xx Options" config 8xx_GPIO bool "GPIO API Support" select GPIOLIB + select OF_GPIO_MM_GPIOCHIP help Saying Y here will cause the ports on an MPC8xx processor to be used with the GPIO API. If you say N here, the kernel needs less memory. @@ -194,6 +195,13 @@ config PIN_TLB_IMMR CONFIG_PIN_TLB_DATA is also selected, it will reduce CONFIG_PIN_TLB_DATA to 24 Mbytes. +config PIN_TLB_TEXT + bool "Pinned TLB for TEXT" + depends on PIN_TLB + default y + help + This pins kernel text with 8M pages. + endmenu endmenu diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c index 10e6e4fe77fc..d02f8dd66427 100644 --- a/arch/powerpc/platforms/8xx/adder875.c +++ b/arch/powerpc/platforms/8xx/adder875.c @@ -7,13 +7,12 @@ */ #include <linux/init.h> -#include <linux/fs_enet_pd.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/cpm1.h> -#include <asm/fs_pd.h> +#include <asm/8xx_immap.h> #include <asm/udbg.h> #include "mpc8xx.h" @@ -83,11 +82,6 @@ static void __init adder875_setup(void) init_ioports(); } -static int __init adder875_probe(void) -{ - return of_machine_is_compatible("analogue-and-micro,adder875"); -} - static const struct of_device_id of_bus_ids[] __initconst = { { .compatible = "simple-bus", }, {}, @@ -102,11 +96,10 @@ machine_device_initcall(adder875, declare_of_platform_devices); define_machine(adder875) { .name = "Adder MPC875", - .probe = adder875_probe, + .compatible = "analogue-and-micro,adder875", .setup_arch = adder875_setup, .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c index a18fc7c99f83..a49d4a9ab3bc 100644 --- a/arch/powerpc/platforms/8xx/cpm1-ic.c +++ b/arch/powerpc/platforms/8xx/cpm1-ic.c @@ -59,7 +59,7 @@ static int cpm_get_irq(struct irq_desc *desc) cpm_vec = in_be16(&data->reg->cpic_civr); cpm_vec >>= 11; - return irq_linear_revmap(data->host, cpm_vec); + return irq_find_mapping(data->host, cpm_vec); } static void cpm_cascade(struct irq_desc *desc) @@ -110,7 +110,8 @@ static int cpm_pic_probe(struct platform_device *pdev) out_be32(&data->reg->cpic_cimr, 0); - data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data); + data->host = irq_domain_create_linear(of_fwnode_handle(dev->of_node), + 64, &cpm_pic_host_ops, data); if (!data->host) return -ENODEV; diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c index bb38c8d8f8de..7462c221115c 100644 --- a/arch/powerpc/platforms/8xx/cpm1.c +++ b/arch/powerpc/platforms/8xx/cpm1.c @@ -40,11 +40,12 @@ #include <asm/io.h> #include <asm/rheap.h> #include <asm/cpm.h> +#include <asm/fixmap.h> -#include <asm/fs_pd.h> +#include <sysdev/fsl_soc.h> #ifdef CONFIG_8xx_GPIO -#include <linux/of_gpio.h> +#include <linux/gpio/driver.h> #endif #define CPM_MAP_SIZE (0x4000) @@ -54,8 +55,6 @@ immap_t __iomem *mpc8xx_immr = (void __iomem *)VIRT_IMMR_BASE; void __init cpm_reset(void) { - sysconf8xx_t __iomem *siu_conf; - cpmp = &mpc8xx_immr->im_cpm; #ifndef CONFIG_PPC_EARLY_DEBUG_CPM @@ -77,12 +76,10 @@ void __init cpm_reset(void) * manual recommends it. * Bit 25, FAM can also be set to use FEC aggressive mode (860T). */ - siu_conf = immr_map(im_siu_conf); if ((mfspr(SPRN_IMMR) & 0xffff) == 0x0900) /* MPC885 */ - out_be32(&siu_conf->sc_sdcr, 0x40); + out_be32(&mpc8xx_immr->im_siu_conf.sc_sdcr, 0x40); else - out_be32(&siu_conf->sc_sdcr, 1); - immr_unmap(siu_conf); + out_be32(&mpc8xx_immr->im_siu_conf.sc_sdcr, 1); } static DEFINE_SPINLOCK(cmd_lock); @@ -94,7 +91,7 @@ int cpm_command(u32 command, u8 opcode) int i, ret; unsigned long flags; - if (command & 0xffffff0f) + if (command & 0xffffff03) return -EINVAL; spin_lock_irqsave(&cmd_lock, flags); @@ -379,7 +376,8 @@ int __init cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode) #ifdef CONFIG_8xx_GPIO struct cpm1_gpio16_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ @@ -389,19 +387,17 @@ struct cpm1_gpio16_chip { int irq[16]; }; -static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm1_gpio16_save_regs(struct cpm1_gpio16_chip *cpm1_gc) { - struct cpm1_gpio16_chip *cpm1_gc = - container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; cpm1_gc->cpdata = in_be16(&iop->dat); } static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; u16 pin_mask; pin_mask = 1 << (15 - gpio); @@ -409,11 +405,9 @@ static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be16(&iop->dat) & pin_mask); } -static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, - int value) +static void __cpm1_gpio16_set(struct cpm1_gpio16_chip *cpm1_gc, u16 pin_mask, int value) { - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; if (value) cpm1_gc->cpdata |= pin_mask; @@ -423,40 +417,39 @@ static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, out_be16(&iop->dat, cpm1_gc->cpdata); } -static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; u16 pin_mask = 1 << (15 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); - __cpm1_gpio16_set(mm_gc, pin_mask, value); + __cpm1_gpio16_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; } static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); return cpm1_gc->irq[gpio] ? : -ENXIO; } static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; unsigned long flags; u16 pin_mask = 1 << (15 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); setbits16(&iop->dir, pin_mask); - __cpm1_gpio16_set(mm_gc, pin_mask, val); + __cpm1_gpio16_set(cpm1_gc, pin_mask, val); spin_unlock_irqrestore(&cpm1_gc->lock, flags); @@ -465,9 +458,8 @@ static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; unsigned long flags; u16 pin_mask = 1 << (15 - gpio); @@ -484,11 +476,10 @@ int cpm1_gpiochip_add16(struct device *dev) { struct device_node *np = dev->of_node; struct cpm1_gpio16_chip *cpm1_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; u16 mask; - cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + cpm1_gc = devm_kzalloc(dev, sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) return -ENOMEM; @@ -502,43 +493,50 @@ int cpm1_gpiochip_add16(struct device *dev) cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++); } - mm_gc = &cpm1_gc->mm_gc; - gc = &mm_gc->gc; - - mm_gc->save_regs = cpm1_gpio16_save_regs; + gc = &cpm1_gc->gc; + gc->base = -1; gc->ngpio = 16; gc->direction_input = cpm1_gpio16_dir_in; gc->direction_output = cpm1_gpio16_dir_out; gc->get = cpm1_gpio16_get; - gc->set = cpm1_gpio16_set; + gc->set_rv = cpm1_gpio16_set; gc->to_irq = cpm1_gpio16_to_irq; gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm1_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm1_gc->regs)) + return PTR_ERR(cpm1_gc->regs); + + cpm1_gpio16_save_regs(cpm1_gc); + + return devm_gpiochip_add_data(dev, gc, cpm1_gc); } struct cpm1_gpio32_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ u32 cpdata; }; -static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm1_gpio32_save_regs(struct cpm1_gpio32_chip *cpm1_gc) { - struct cpm1_gpio32_chip *cpm1_gc = - container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; cpm1_gc->cpdata = in_be32(&iop->dat); } static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; u32 pin_mask; pin_mask = 1 << (31 - gpio); @@ -546,11 +544,9 @@ static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be32(&iop->dat) & pin_mask); } -static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, - int value) +static void __cpm1_gpio32_set(struct cpm1_gpio32_chip *cpm1_gc, u32 pin_mask, int value) { - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; if (value) cpm1_gc->cpdata |= pin_mask; @@ -560,32 +556,32 @@ static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, out_be32(&iop->dat, cpm1_gc->cpdata); } -static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); - __cpm1_gpio32_set(mm_gc, pin_mask, value); + __cpm1_gpio32_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; } static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); setbits32(&iop->dir, pin_mask); - __cpm1_gpio32_set(mm_gc, pin_mask, val); + __cpm1_gpio32_set(cpm1_gc, pin_mask, val); spin_unlock_irqrestore(&cpm1_gc->lock, flags); @@ -594,9 +590,8 @@ static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); @@ -613,28 +608,35 @@ int cpm1_gpiochip_add32(struct device *dev) { struct device_node *np = dev->of_node; struct cpm1_gpio32_chip *cpm1_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; - cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + cpm1_gc = devm_kzalloc(dev, sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) return -ENOMEM; spin_lock_init(&cpm1_gc->lock); - mm_gc = &cpm1_gc->mm_gc; - gc = &mm_gc->gc; - - mm_gc->save_regs = cpm1_gpio32_save_regs; + gc = &cpm1_gc->gc; + gc->base = -1; gc->ngpio = 32; gc->direction_input = cpm1_gpio32_dir_in; gc->direction_output = cpm1_gpio32_dir_out; gc->get = cpm1_gpio32_get; - gc->set = cpm1_gpio32_set; + gc->set_rv = cpm1_gpio32_set; gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm1_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm1_gc->regs)) + return PTR_ERR(cpm1_gc->regs); + + cpm1_gpio32_save_regs(cpm1_gc); + + return devm_gpiochip_add_data(dev, gc, cpm1_gc); } #endif /* CONFIG_8xx_GPIO */ diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c index b3b22520b435..fc276a29d67f 100644 --- a/arch/powerpc/platforms/8xx/ep88xc.c +++ b/arch/powerpc/platforms/8xx/ep88xc.c @@ -142,11 +142,6 @@ static void __init ep88xc_setup_arch(void) BCSR8_PHY2_ENABLE | BCSR8_PHY2_POWER); } -static int __init ep88xc_probe(void) -{ - return of_machine_is_compatible("fsl,ep88xc"); -} - static const struct of_device_id of_bus_ids[] __initconst = { { .name = "soc", }, { .name = "cpm", }, @@ -165,7 +160,7 @@ machine_device_initcall(ep88xc, declare_of_platform_devices); define_machine(ep88xc) { .name = "Embedded Planet EP88xC", - .probe = ep88xc_probe, + .compatible = "fsl,ep88xc", .setup_arch = ep88xc_setup_arch, .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 24f358f86d16..2336b687bc96 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -22,7 +22,6 @@ #include <asm/io.h> #include <asm/8xx_immap.h> -#include <asm/fs_pd.h> #include <mm/mmu_decl.h> #include "pic.h" @@ -37,20 +36,6 @@ static irqreturn_t timebase_interrupt(int irq, void *dev) return IRQ_HANDLED; } -/* per-board overridable init_internal_rtc() function. */ -void __init __attribute__ ((weak)) -init_internal_rtc(void) -{ - sit8xx_t __iomem *sys_tmr = immr_map(im_sit); - - /* Disable the RTC one second and alarm interrupts. */ - clrbits16(&sys_tmr->sit_rtcsc, (RTCSC_SIE | RTCSC_ALE)); - - /* Enable the RTC */ - setbits16(&sys_tmr->sit_rtcsc, (RTCSC_RTF | RTCSC_RTE)); - immr_unmap(sys_tmr); -} - static int __init get_freq(char *name, unsigned long *val) { struct device_node *cpu; @@ -80,23 +65,14 @@ static int __init get_freq(char *name, unsigned long *val) void __init mpc8xx_calibrate_decr(void) { struct device_node *cpu; - cark8xx_t __iomem *clk_r1; - car8xx_t __iomem *clk_r2; - sitk8xx_t __iomem *sys_tmr1; - sit8xx_t __iomem *sys_tmr2; int irq, virq; - clk_r1 = immr_map(im_clkrstk); - /* Unlock the SCCR. */ - out_be32(&clk_r1->cark_sccrk, ~KAPWR_KEY); - out_be32(&clk_r1->cark_sccrk, KAPWR_KEY); - immr_unmap(clk_r1); + out_be32(&mpc8xx_immr->im_clkrstk.cark_sccrk, ~KAPWR_KEY); + out_be32(&mpc8xx_immr->im_clkrstk.cark_sccrk, KAPWR_KEY); /* Force all 8xx processors to use divide by 16 processor clock. */ - clk_r2 = immr_map(im_clkrst); - setbits32(&clk_r2->car_sccr, 0x02000000); - immr_unmap(clk_r2); + setbits32(&mpc8xx_immr->im_clkrst.car_sccr, 0x02000000); /* Processor frequency is MHz. */ @@ -123,16 +99,18 @@ void __init mpc8xx_calibrate_decr(void) * we guarantee the registers are locked, then we unlock them * for our use. */ - sys_tmr1 = immr_map(im_sitk); - out_be32(&sys_tmr1->sitk_tbscrk, ~KAPWR_KEY); - out_be32(&sys_tmr1->sitk_rtcsck, ~KAPWR_KEY); - out_be32(&sys_tmr1->sitk_tbk, ~KAPWR_KEY); - out_be32(&sys_tmr1->sitk_tbscrk, KAPWR_KEY); - out_be32(&sys_tmr1->sitk_rtcsck, KAPWR_KEY); - out_be32(&sys_tmr1->sitk_tbk, KAPWR_KEY); - immr_unmap(sys_tmr1); + out_be32(&mpc8xx_immr->im_sitk.sitk_tbscrk, ~KAPWR_KEY); + out_be32(&mpc8xx_immr->im_sitk.sitk_rtcsck, ~KAPWR_KEY); + out_be32(&mpc8xx_immr->im_sitk.sitk_tbk, ~KAPWR_KEY); + out_be32(&mpc8xx_immr->im_sitk.sitk_tbscrk, KAPWR_KEY); + out_be32(&mpc8xx_immr->im_sitk.sitk_rtcsck, KAPWR_KEY); + out_be32(&mpc8xx_immr->im_sitk.sitk_tbk, KAPWR_KEY); + + /* Disable the RTC one second and alarm interrupts. */ + clrbits16(&mpc8xx_immr->im_sit.sit_rtcsc, (RTCSC_SIE | RTCSC_ALE)); - init_internal_rtc(); + /* Enable the RTC */ + setbits16(&mpc8xx_immr->im_sit.sit_rtcsc, (RTCSC_RTF | RTCSC_RTE)); /* Enabling the decrementer also enables the timebase interrupts * (or from the other point of view, to get decrementer interrupts @@ -144,10 +122,8 @@ void __init mpc8xx_calibrate_decr(void) of_node_put(cpu); irq = virq_to_hw(virq); - sys_tmr2 = immr_map(im_sit); - out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | - (TBSCR_TBF | TBSCR_TBE)); - immr_unmap(sys_tmr2); + out_be16(&mpc8xx_immr->im_sit.sit_tbscr, + ((1 << (7 - (irq / 2))) << 8) | (TBSCR_TBF | TBSCR_TBE)); if (request_irq(virq, timebase_interrupt, IRQF_NO_THREAD, "tbint", NULL)) @@ -161,47 +137,36 @@ void __init mpc8xx_calibrate_decr(void) int mpc8xx_set_rtc_time(struct rtc_time *tm) { - sitk8xx_t __iomem *sys_tmr1; - sit8xx_t __iomem *sys_tmr2; time64_t time; - sys_tmr1 = immr_map(im_sitk); - sys_tmr2 = immr_map(im_sit); time = rtc_tm_to_time64(tm); - out_be32(&sys_tmr1->sitk_rtck, KAPWR_KEY); - out_be32(&sys_tmr2->sit_rtc, (u32)time); - out_be32(&sys_tmr1->sitk_rtck, ~KAPWR_KEY); + out_be32(&mpc8xx_immr->im_sitk.sitk_rtck, KAPWR_KEY); + out_be32(&mpc8xx_immr->im_sit.sit_rtc, (u32)time); + out_be32(&mpc8xx_immr->im_sitk.sitk_rtck, ~KAPWR_KEY); - immr_unmap(sys_tmr2); - immr_unmap(sys_tmr1); return 0; } void mpc8xx_get_rtc_time(struct rtc_time *tm) { unsigned long data; - sit8xx_t __iomem *sys_tmr = immr_map(im_sit); /* Get time from the RTC. */ - data = in_be32(&sys_tmr->sit_rtc); + data = in_be32(&mpc8xx_immr->im_sit.sit_rtc); rtc_time64_to_tm(data, tm); - immr_unmap(sys_tmr); return; } void __noreturn mpc8xx_restart(char *cmd) { - car8xx_t __iomem *clk_r = immr_map(im_clkrst); - - local_irq_disable(); - setbits32(&clk_r->car_plprcr, 0x00000080); + setbits32(&mpc8xx_immr->im_clkrst.car_plprcr, 0x00000080); /* Clear the ME bit in MSR to cause checkstop on machine check */ mtmsr(mfmsr() & ~0x1000); - in_8(&clk_r->res[0]); + in_8(&mpc8xx_immr->im_clkrst.res[0]); panic("Restart failed\n"); } diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c index 03267e4a44a9..e4192c0a3c0c 100644 --- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c @@ -24,7 +24,6 @@ #include <asm/time.h> #include <asm/8xx_immap.h> #include <asm/cpm1.h> -#include <asm/fs_pd.h> #include <asm/udbg.h> #include "mpc86xads.h" @@ -117,11 +116,6 @@ static void __init mpc86xads_setup_arch(void) iounmap(bcsr_io); } -static int __init mpc86xads_probe(void) -{ - return of_machine_is_compatible("fsl,mpc866ads"); -} - static const struct of_device_id of_bus_ids[] __initconst = { { .name = "soc", }, { .name = "cpm", }, @@ -139,7 +133,7 @@ machine_device_initcall(mpc86x_ads, declare_of_platform_devices); define_machine(mpc86x_ads) { .name = "MPC86x ADS", - .probe = mpc86xads_probe, + .compatible = "fsl,mpc866ads", .setup_arch = mpc86xads_setup_arch, .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c index b1e39f96de00..2d899be746eb 100644 --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c @@ -21,8 +21,6 @@ #include <linux/device.h> #include <linux/delay.h> -#include <linux/fs_enet_pd.h> -#include <linux/fs_uart_pd.h> #include <linux/fsl_devices.h> #include <linux/mii.h> #include <linux/of_address.h> @@ -37,7 +35,6 @@ #include <asm/time.h> #include <asm/8xx_immap.h> #include <asm/cpm1.h> -#include <asm/fs_pd.h> #include <asm/udbg.h> #include "mpc885ads.h" @@ -192,11 +189,6 @@ static void __init mpc885ads_setup_arch(void) } } -static int __init mpc885ads_probe(void) -{ - return of_machine_is_compatible("fsl,mpc885ads"); -} - static const struct of_device_id of_bus_ids[] __initconst = { { .name = "soc", }, { .name = "cpm", }, @@ -215,7 +207,7 @@ machine_device_initcall(mpc885_ads, declare_of_platform_devices); define_machine(mpc885_ads) { .name = "Freescale MPC885 ADS", - .probe = mpc885ads_probe, + .compatible = "fsl,mpc885ads", .setup_arch = mpc885ads_setup_arch, .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c index ea6b0e523c60..933d6ab7f512 100644 --- a/arch/powerpc/platforms/8xx/pic.c +++ b/arch/powerpc/platforms/8xx/pic.c @@ -80,7 +80,7 @@ unsigned int mpc8xx_get_irq(void) if (irq == PIC_VEC_SPURRIOUS) return 0; - return irq_linear_revmap(mpc8xx_pic_host, irq); + return irq_find_mapping(mpc8xx_pic_host, irq); } @@ -146,7 +146,8 @@ void __init mpc8xx_pic_init(void) if (!siu_reg) goto out; - mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL); + mpc8xx_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 64, + &mpc8xx_pic_host_ops, NULL); if (!mpc8xx_pic_host) printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c index 3725d51248df..d97a7910c594 100644 --- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c +++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c @@ -24,8 +24,6 @@ #include <linux/device.h> #include <linux/delay.h> -#include <linux/fs_enet_pd.h> -#include <linux/fs_uart_pd.h> #include <linux/fsl_devices.h> #include <linux/mii.h> #include <linux/of_fdt.h> @@ -39,7 +37,6 @@ #include <asm/time.h> #include <asm/8xx_immap.h> #include <asm/cpm1.h> -#include <asm/fs_pd.h> #include <asm/udbg.h> #include "mpc8xx.h" @@ -105,6 +102,9 @@ static void __init init_ioports(void) if (dnode == NULL) return; prop = of_find_property(dnode, "ethernet1", &len); + + of_node_put(dnode); + if (prop == NULL) return; @@ -118,11 +118,6 @@ static void __init tqm8xx_setup_arch(void) init_ioports(); } -static int __init tqm8xx_probe(void) -{ - return of_machine_is_compatible("tqc,tqm8xx"); -} - static const struct of_device_id of_bus_ids[] __initconst = { { .name = "soc", }, { .name = "cpm", }, @@ -141,7 +136,7 @@ machine_device_initcall(tqm8xx, declare_of_platform_devices); define_machine(tqm8xx) { .name = "TQM8xx", - .probe = tqm8xx_probe, + .compatible = "tqc,tqm8xx", .setup_arch = tqm8xx_setup_arch, .init_IRQ = mpc8xx_pic_init, .get_irq = mpc8xx_get_irq, diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index d41dad227de8..fea3766eac0f 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -7,7 +7,6 @@ source "arch/powerpc/platforms/chrp/Kconfig" source "arch/powerpc/platforms/512x/Kconfig" source "arch/powerpc/platforms/52xx/Kconfig" source "arch/powerpc/platforms/powermac/Kconfig" -source "arch/powerpc/platforms/maple/Kconfig" source "arch/powerpc/platforms/pasemi/Kconfig" source "arch/powerpc/platforms/ps3/Kconfig" source "arch/powerpc/platforms/cell/Kconfig" @@ -18,7 +17,6 @@ source "arch/powerpc/platforms/85xx/Kconfig" source "arch/powerpc/platforms/86xx/Kconfig" source "arch/powerpc/platforms/embedded6xx/Kconfig" source "arch/powerpc/platforms/44x/Kconfig" -source "arch/powerpc/platforms/40x/Kconfig" source "arch/powerpc/platforms/amigaone/Kconfig" source "arch/powerpc/platforms/book3s/Kconfig" source "arch/powerpc/platforms/microwatt/Kconfig" @@ -72,10 +70,6 @@ config PPC_DT_CPU_FTRS firmware provides this binding. If you're not sure say Y. -config UDBG_RTAS_CONSOLE - bool "RTAS based debug console" - depends on PPC_RTAS - config PPC_SMP_MUXED_IPI bool help @@ -188,12 +182,6 @@ config PPC_INDIRECT_PIO bool select GENERIC_IOMAP -config PPC_INDIRECT_MMIO - bool - -config PPC_IO_WORKAROUNDS - bool - source "drivers/cpufreq/Kconfig" menu "CPUIdle driver" @@ -244,16 +232,18 @@ config QE_GPIO bool "QE GPIO support" depends on QUICC_ENGINE select GPIOLIB + select OF_GPIO_MM_GPIOCHIP help Say Y here if you're going to use hardware that connects to the QE GPIOs. config CPM2 bool "Enable support for the CPM2 (Communications Processor Module)" - depends on (FSL_SOC_BOOKE && PPC32) || 8260 + depends on (FSL_SOC_BOOKE && PPC32) || PPC_82xx select CPM select HAVE_PCI select GPIOLIB + select OF_GPIO_MM_GPIOCHIP help The CPM2 (Communications Processor Module) is a coprocessor on embedded CPUs made by Freescale. Selecting this option means that @@ -261,7 +251,10 @@ config CPM2 on it (826x, 827x, 8560). config FSL_ULI1575 - bool + bool "ULI1575 PCIe south bridge support" + depends on FSL_SOC_BOOKE || PPC_86xx + depends on PCI + select FSL_PCI select GENERIC_ISA_DMA help Supports for the ULI1575 PCIe south bridge that exists on some diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 9e2df4b66478..613b383ed8b3 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -2,7 +2,6 @@ config PPC32 bool default y if !PPC64 - select KASAN_VMALLOC if KASAN && MODULES config PPC64 bool "64-bit kernel" @@ -34,7 +33,7 @@ config PPC_BOOK3S_32 config PPC_85xx bool "Freescale 85xx" - select E500 + select PPC_E500 config PPC_8xx bool "Freescale 8xx" @@ -44,19 +43,10 @@ config PPC_8xx select HAVE_ARCH_VMAP_STACK select HUGETLBFS -config 40x - bool "AMCC 40x" - select PPC_DCR_NATIVE - select PPC_UDBG_16550 - select 4xx_SOC - select HAVE_PCI - select PPC_KUEP if PPC_KUAP - config 44x bool "AMCC 44x, 46x or 47x" select PPC_DCR_NATIVE select PPC_UDBG_16550 - select 4xx_SOC select HAVE_PCI select PHYS_64BIT select PPC_KUEP @@ -94,10 +84,8 @@ config PPC_BOOK3S_64 bool "Server processors" select PPC_FPU select PPC_HAVE_PMU_SUPPORT - select HAVE_ARCH_TRANSPARENT_HUGEPAGE select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_SPLIT_PMD_PTLOCK - select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_NUMA_BALANCING select HAVE_MOVE_PMD @@ -108,7 +96,8 @@ config PPC_BOOK3S_64 config PPC_BOOK3E_64 bool "Embedded processors" - select PPC_FSL_BOOK3E + select PPC_E500 + select PPC_E500MC select PPC_FPU # Make it a choice ? select PPC_SMP_MUXED_IPI select PPC_DOORBELL @@ -116,37 +105,46 @@ config PPC_BOOK3E_64 endchoice +config PPC_THP + def_bool y + depends on PPC_BOOK3S_64 + depends on PPC_RADIX_MMU || (PPC_64S_HASH_MMU && PAGE_SIZE_64KB) + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + choice prompt "CPU selection" - default GENERIC_CPU help This will create a kernel which is optimised for a particular CPU. The resulting kernel may not run on other CPUs, so use this with care. If unsure, select Generic. -config GENERIC_CPU - bool "Generic (POWER4 and above)" - depends on PPC64 && !CPU_LITTLE_ENDIAN - select PPC_64S_HASH_MMU if PPC_BOOK3S_64 +config POWERPC64_CPU + bool "Generic (POWER5 and PowerPC 970 and above)" + depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN + select PPC_64S_HASH_MMU -config GENERIC_CPU +config POWERPC64_CPU bool "Generic (POWER8 and above)" - depends on PPC64 && CPU_LITTLE_ENDIAN + depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN select ARCH_HAS_FAST_MULTIPLIER select PPC_64S_HASH_MMU + select PPC_HAS_LBARX_LHARX -config GENERIC_CPU +config POWERPC_CPU bool "Generic 32 bits powerpc" - depends on PPC32 && !PPC_8xx + depends on PPC_BOOK3S_32 config CELL_CPU bool "Cell Broadband Engine" depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN + depends on !CC_IS_CLANG select PPC_64S_HASH_MMU -config POWER5_CPU - bool "POWER5" +config PPC_970_CPU + bool "PowerPC 970 (including PowerPC G5)" depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN select PPC_64S_HASH_MMU @@ -160,81 +158,138 @@ config POWER7_CPU depends on PPC_BOOK3S_64 select ARCH_HAS_FAST_MULTIPLIER select PPC_64S_HASH_MMU + select PPC_HAS_LBARX_LHARX config POWER8_CPU bool "POWER8" depends on PPC_BOOK3S_64 select ARCH_HAS_FAST_MULTIPLIER select PPC_64S_HASH_MMU + select PPC_HAS_LBARX_LHARX config POWER9_CPU bool "POWER9" depends on PPC_BOOK3S_64 select ARCH_HAS_FAST_MULTIPLIER + select PPC_HAS_LBARX_LHARX + +config POWER10_CPU + bool "POWER10" + depends on PPC_BOOK3S_64 + select ARCH_HAS_FAST_MULTIPLIER + select PPC_HAVE_PREFIXED_SUPPORT + select PPC_HAVE_PCREL_SUPPORT config E5500_CPU bool "Freescale e5500" - depends on E500 + depends on PPC64 && PPC_E500 config E6500_CPU bool "Freescale e6500" - depends on E500 + depends on PPC64 && PPC_E500 + depends on !CC_IS_CLANG + select PPC_HAS_LBARX_LHARX + +config 440_CPU + bool "440 (44x family)" + depends on 44x + +config 464_CPU + bool "464 (44x family)" + depends on 44x + depends on !CC_IS_CLANG + +config 476_CPU + bool "476 (47x family)" + depends on PPC_47x + depends on !CC_IS_CLANG config 860_CPU bool "8xx family" depends on PPC_8xx + depends on !CC_IS_CLANG config E300C2_CPU bool "e300c2 (832x)" depends on PPC_BOOK3S_32 + depends on !CC_IS_CLANG config E300C3_CPU bool "e300c3 (831x)" depends on PPC_BOOK3S_32 + depends on !CC_IS_CLANG config G4_CPU bool "G4 (74xx)" depends on PPC_BOOK3S_32 select ALTIVEC +config E500_CPU + bool "e500 (8540)" + depends on PPC_85xx && !PPC_E500MC + +config E500MC_CPU + bool "e500mc" + depends on PPC_85xx && PPC_E500MC + +config TOOLCHAIN_DEFAULT_CPU + bool "Rely on the toolchain's implicit default CPU" + endchoice config TARGET_CPU_BOOL bool - default !GENERIC_CPU + default !TOOLCHAIN_DEFAULT_CPU config TARGET_CPU string depends on TARGET_CPU_BOOL default "cell" if CELL_CPU - default "power5" if POWER5_CPU + default "970" if PPC_970_CPU default "power6" if POWER6_CPU default "power7" if POWER7_CPU default "power8" if POWER8_CPU default "power9" if POWER9_CPU + default "power10" if POWER10_CPU + default "e5500" if E5500_CPU + default "e6500" if E6500_CPU + default "power4" if POWERPC64_CPU && !CPU_LITTLE_ENDIAN + default "power8" if POWERPC64_CPU && CPU_LITTLE_ENDIAN + default "440" if 440_CPU + default "464" if 464_CPU + default "476" if 476_CPU default "860" if 860_CPU default "e300c2" if E300C2_CPU default "e300c3" if E300C3_CPU default "G4" if G4_CPU + default "8540" if E500_CPU + default "e500mc" if E500MC_CPU + default "powerpc" if POWERPC_CPU + +config TUNE_CPU + string + depends on POWERPC64_CPU + default "-mtune=power10" if $(cc-option,-mtune=power10) + default "-mtune=power9" if $(cc-option,-mtune=power9) + default "-mtune=power8" if $(cc-option,-mtune=power8) config PPC_BOOK3S def_bool y depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 -config PPC_BOOK3E - def_bool y - depends on PPC_BOOK3E_64 - -config E500 +config PPC_E500 select FSL_EMB_PERFMON - select PPC_FSL_BOOK3E bool + select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 + select PPC_SMP_MUXED_IPI + select PPC_DOORBELL + select PPC_KUEP config PPC_E500MC bool "e500mc Support" select PPC_FPU select COMMON_CLK - depends on E500 + depends on PPC_E500 help This must be enabled for running on e500mc (and derivatives such as e5500/e6500), and must be disabled for running on @@ -257,7 +312,7 @@ config PPC_FPU config FSL_EMB_PERFMON bool "Freescale Embedded Perfmon" - depends on E500 || PPC_83xx + depends on PPC_E500 || PPC_83xx help This is the Performance Monitor support found on the e500 core and some e300 cores (c3 and c4). Select this only if your @@ -270,47 +325,27 @@ config FSL_EMB_PERF_EVENT config FSL_EMB_PERF_EVENT_E500 bool - depends on FSL_EMB_PERF_EVENT && E500 + depends on FSL_EMB_PERF_EVENT && PPC_E500 default y config 4xx bool - depends on 40x || 44x + depends on 44x default y config BOOKE bool - depends on E500 || 44x || PPC_BOOK3E - default y - -config BOOKE_OR_40x - bool - depends on BOOKE || 40x + depends on PPC_E500 || 44x default y -config FSL_BOOKE - bool - depends on E500 && PPC32 - default y - -# this is for common code between PPC32 & PPC64 FSL BOOKE -config PPC_FSL_BOOK3E - bool - select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 - imply FSL_EMB_PERFMON - select PPC_SMP_MUXED_IPI - select PPC_DOORBELL - select PPC_KUEP - default y if FSL_BOOKE - config PTE_64BIT bool - depends on 44x || E500 || PPC_86xx + depends on 44x || PPC_E500 || PPC_86xx default y if PHYS_64BIT config PHYS_64BIT - bool 'Large physical address support' if E500 || PPC_86xx - depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx + bool 'Large physical address support' if PPC_E500 || PPC_86xx + depends on (44x || PPC_E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx select PHYS_ADDR_T_64BIT help This option enables kernel support for larger than 32-bit physical @@ -324,7 +359,7 @@ config PHYS_64BIT config ALTIVEC bool "AltiVec Support" - depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 || (PPC_E500MC && PPC64) + depends on PPC_BOOK3S || (PPC_E500MC && PPC64 && !E5500_CPU) select PPC_FPU help This option enables kernel support for the Altivec extensions to the @@ -357,7 +392,7 @@ config VSX config SPE_POSSIBLE def_bool y - depends on E500 && !PPC_E500MC + depends on PPC_E500 && !PPC_E500MC config SPE bool "SPE Support" @@ -414,9 +449,52 @@ config PPC_RADIX_MMU_DEFAULT If you're unsure, say Y. +config PPC_RADIX_BROADCAST_TLBIE + bool + depends on PPC_RADIX_MMU + help + Power ISA v3.0 and later implementations in the Linux Compliancy Subset + and lower are not required to implement broadcast TLBIE instructions. + Platforms with CPUs that do implement TLBIE broadcast, that is, where + a TLB invalidation instruction performed on one CPU operates on the + TLBs of all CPUs in the system, should select this option. If this + option is selected, the disable_tlbie kernel command line option can + be used to cause global TLB invalidations to be done via IPIs; without + it, IPIs will be used unconditionally. + +config PPC_KERNEL_PREFIXED + depends on PPC_HAVE_PREFIXED_SUPPORT + depends on CC_HAS_PREFIXED + default n + bool "Build Kernel with Prefixed Instructions" + help + POWER10 and later CPUs support prefixed instructions, 8 byte + instructions that include large immediate, pc relative addressing, + and various floating point, vector, MMA. + + This option builds the kernel with prefixed instructions, and + allows a pc relative addressing option to be selected. + + Kernel support for prefixed instructions in applications and guests + is not affected by this option. + +config PPC_KERNEL_PCREL + depends on PPC_HAVE_PCREL_SUPPORT + depends on PPC_HAVE_PREFIXED_SUPPORT + depends on CC_HAS_PCREL + default n + select PPC_KERNEL_PREFIXED + bool "Build Kernel with PC-Relative addressing model" + help + POWER10 and later CPUs support pc relative addressing. Recent + compilers have support for an ELF ABI extension for a pc relative + ABI. + + This option builds the kernel with the pc relative ABI model. + config PPC_KUEP - bool "Kernel Userspace Execution Prevention" if !40x - default y if !40x + bool "Kernel Userspace Execution Prevention" + default y help Enable support for Kernel Userspace Execution Prevention (KUEP) @@ -447,13 +525,15 @@ config PPC_MMU_NOHASH def_bool y depends on !PPC_BOOK3S -config PPC_BOOK3E_MMU - def_bool y - depends on FSL_BOOKE || PPC_BOOK3E - config PPC_HAVE_PMU_SUPPORT bool +config PPC_HAVE_PREFIXED_SUPPORT + bool + +config PPC_HAVE_PCREL_SUPPORT + bool + config PMU_SYSFS bool "Create PMU SPRs sysfs file" default n @@ -472,7 +552,7 @@ config FORCE_SMP select SMP config SMP - depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x + depends on PPC_BOOK3S || PPC_E500 || PPC_47x select GENERIC_IRQ_MIGRATION bool "Symmetric multi-processing support" if !FORCE_SMP help @@ -500,7 +580,7 @@ config NR_CPUS config NOT_COHERENT_CACHE bool - depends on 4xx || PPC_8xx || PPC_MPC512x || \ + depends on 44x || PPC_8xx || PPC_MPC512x || \ GAMECUBE_COMMON || AMIGAONE select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -554,10 +634,10 @@ config CPU_LITTLE_ENDIAN endchoice config PPC64_ELF_ABI_V1 - def_bool PPC64 && CPU_BIG_ENDIAN + def_bool PPC64 && (CPU_BIG_ENDIAN && !PPC64_BIG_ENDIAN_ELF_ABI_V2) config PPC64_ELF_ABI_V2 - def_bool PPC64 && CPU_LITTLE_ENDIAN + def_bool PPC64 && !PPC64_ELF_ABI_V1 config PPC64_BOOT_WRAPPER def_bool n diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index 94470fb27c99..3cee4a842736 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -4,8 +4,6 @@ obj-$(CONFIG_FSL_ULI1575) += fsl_uli1575.o obj-$(CONFIG_PPC_PMAC) += powermac/ obj-$(CONFIG_PPC_CHRP) += chrp/ -obj-$(CONFIG_4xx) += 4xx/ -obj-$(CONFIG_40x) += 40x/ obj-$(CONFIG_44x) += 44x/ obj-$(CONFIG_PPC_MPC512x) += 512x/ obj-$(CONFIG_PPC_MPC52xx) += 52xx/ @@ -16,7 +14,6 @@ obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/ obj-$(CONFIG_PPC_86xx) += 86xx/ obj-$(CONFIG_PPC_POWERNV) += powernv/ obj-$(CONFIG_PPC_PSERIES) += pseries/ -obj-$(CONFIG_PPC_MAPLE) += maple/ obj-$(CONFIG_PPC_PASEMI) += pasemi/ obj-$(CONFIG_PPC_CELL) += cell/ obj-$(CONFIG_PPC_PS3) += ps3/ diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c index 397ce6a40bd0..33f852a7625f 100644 --- a/arch/powerpc/platforms/amigaone/setup.c +++ b/arch/powerpc/platforms/amigaone/setup.c @@ -25,7 +25,7 @@ extern void __flush_disable_L1(void); -void amigaone_show_cpuinfo(struct seq_file *m) +static void amigaone_show_cpuinfo(struct seq_file *m) { seq_printf(m, "vendor\t\t: Eyetech Ltd.\n"); } @@ -65,7 +65,7 @@ static int __init amigaone_add_bridge(struct device_node *dev) return 0; } -void __init amigaone_setup_arch(void) +static void __init amigaone_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0); @@ -83,7 +83,7 @@ static void __init amigaone_discover_phbs(void) BUG_ON(phb != 0); } -void __init amigaone_init_IRQ(void) +static void __init amigaone_init_IRQ(void) { struct device_node *pic, *np = NULL; const unsigned long *prop = NULL; @@ -109,7 +109,7 @@ void __init amigaone_init_IRQ(void) i8259_init(pic, int_ack); ppc_md.get_irq = i8259_irq; - irq_set_default_host(i8259_get_host()); + irq_set_default_domain(i8259_get_host()); } static int __init request_isa_regions(void) @@ -123,7 +123,7 @@ static int __init request_isa_regions(void) } machine_device_initcall(amigaone, request_isa_regions); -void __noreturn amigaone_restart(char *cmd) +static void __noreturn amigaone_restart(char *cmd) { local_irq_disable(); @@ -143,30 +143,26 @@ void __noreturn amigaone_restart(char *cmd) static int __init amigaone_probe(void) { - if (of_machine_is_compatible("eyetech,amigaone")) { - /* - * Coherent memory access cause complete system lockup! Thus - * disable this CPU feature, even if the CPU needs it. - */ - cur_cpu_spec->cpu_features &= ~CPU_FTR_NEED_COHERENT; + /* + * Coherent memory access cause complete system lockup! Thus + * disable this CPU feature, even if the CPU needs it. + */ + cur_cpu_spec->cpu_features &= ~CPU_FTR_NEED_COHERENT; - DMA_MODE_READ = 0x44; - DMA_MODE_WRITE = 0x48; + DMA_MODE_READ = 0x44; + DMA_MODE_WRITE = 0x48; - return 1; - } - - return 0; + return 1; } define_machine(amigaone) { .name = "AmigaOne", + .compatible = "eyetech,amigaone", .probe = amigaone_probe, .setup_arch = amigaone_setup_arch, .discover_phbs = amigaone_discover_phbs, .show_cpuinfo = amigaone_show_cpuinfo, .init_IRQ = amigaone_init_IRQ, .restart = amigaone_restart, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index c0799fb26b6d..dc6f75d3ac6e 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -4,6 +4,8 @@ * Copyright (C) 2019 Haren Myneni, IBM Corp */ +#define pr_fmt(fmt) "vas-api: " fmt + #include <linux/kernel.h> #include <linux/device.h> #include <linux/cdev.h> @@ -53,7 +55,7 @@ struct coproc_instance { struct vas_window *txwin; }; -static char *coproc_devnode(struct device *dev, umode_t *mode) +static char *coproc_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev)); } @@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref) task_ref->mm = get_task_mm(current); if (!task_ref->mm) { put_pid(task_ref->pid); - pr_err("VAS: pid(%d): mm_struct is not found\n", + pr_err("pid(%d): mm_struct is not found\n", current->pid); return -EPERM; } @@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb, rc = kill_pid_info(SIGSEGV, &info, pid); rcu_read_unlock(); - pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__, - pid_vnr(pid), rc); + pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc); } void vas_dump_crb(struct coprocessor_request_block *crb) @@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg) rc = copy_from_user(&uattr, uptr, sizeof(uattr)); if (rc) { - pr_err("%s(): copy_from_user() returns %d\n", __func__, rc); + pr_err("copy_from_user() returns %d\n", rc); return -EFAULT; } @@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg) txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags, cp_inst->coproc->cop_type); if (IS_ERR(txwin)) { - pr_err("%s() VAS window open failed, %ld\n", __func__, + pr_err_ratelimited("VAS window open failed rc=%ld\n", PTR_ERR(txwin)); return PTR_ERR(txwin); } @@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) * window is not opened. Shouldn't expect this error. */ if (!cp_inst || !cp_inst->txwin) { - pr_err("%s(): Unexpected fault on paste address with TX window closed\n", - __func__); + pr_err("Unexpected fault on paste address with TX window closed\n"); return VM_FAULT_SIGBUS; } @@ -414,15 +414,14 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) /* * When the LPAR lost credits due to core removal or during * migration, invalidate the existing mapping for the current - * paste addresses and set windows in-active (zap_page_range in + * paste addresses and set windows in-active (zap_vma_pages in * reconfig_close_windows()). * New mapping will be done later after migration or new credits * available. So continue to receive faults if the user space * issue NX request. */ if (txwin->task_ref.vma != vmf->vma) { - pr_err("%s(): No previous mapping with paste address\n", - __func__); + pr_err("No previous mapping with paste address\n"); return VM_FAULT_SIGBUS; } @@ -431,7 +430,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) * The window may be inactive due to lost credit (Ex: core * removal with DLPAR). If the window is active again when * the credit is available, map the new paste address at the - * the window virtual address. + * window virtual address. */ if (txwin->status == VAS_WIN_ACTIVE) { paste_addr = cp_inst->coproc->vops->paste_addr(txwin); @@ -465,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +/* + * During mmap() paste address, mapping VMA is saved in VAS window + * struct which is used to unmap during migration if the window is + * still open. But the user space can remove this mapping with + * munmap() before closing the window and the VMA address will + * be invalid. Set VAS window VMA to NULL in this function which + * is called before VMA free. + */ +static void vas_mmap_close(struct vm_area_struct *vma) +{ + struct file *fp = vma->vm_file; + struct coproc_instance *cp_inst = fp->private_data; + struct vas_window *txwin; + + /* Should not happen */ + if (!cp_inst || !cp_inst->txwin) { + pr_err("No attached VAS window for the paste address mmap\n"); + return; + } + + txwin = cp_inst->txwin; + /* + * task_ref.vma is set in coproc_mmap() during mmap paste + * address. So it has to be the same VMA that is getting freed. + */ + if (WARN_ON(txwin->task_ref.vma != vma)) { + pr_err("Invalid paste address mmaping\n"); + return; + } + + mutex_lock(&txwin->task_ref.mmap_mutex); + txwin->task_ref.vma = NULL; + mutex_unlock(&txwin->task_ref.mmap_mutex); +} + static const struct vm_operations_struct vas_vm_ops = { + .close = vas_mmap_close, .fault = vas_mmap_fault, }; @@ -481,19 +516,28 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) txwin = cp_inst->txwin; if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { - pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__, + pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n", (vma->vm_end - vma->vm_start), PAGE_SIZE); return -EINVAL; } + /* + * Map complete page to the paste address. So the user + * space should pass 0ULL to the offset parameter. + */ + if (vma->vm_pgoff) { + pr_debug("Page offset unsupported to map paste address\n"); + return -EINVAL; + } + /* Ensure instance has an open send window */ if (!txwin) { - pr_err("%s(): No send window open?\n", __func__); + pr_err("No send window open?\n"); return -EINVAL; } if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) { - pr_err("%s(): VAS API is not registered\n", __func__); + pr_err("VAS API is not registered\n"); return -EACCES; } @@ -510,14 +554,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) */ mutex_lock(&txwin->task_ref.mmap_mutex); if (txwin->status != VAS_WIN_ACTIVE) { - pr_err("%s(): Window is not active\n", __func__); + pr_err("Window is not active\n"); rc = -EACCES; goto out; } paste_addr = cp_inst->coproc->vops->paste_addr(txwin); if (!paste_addr) { - pr_err("%s(): Window paste address failed\n", __func__); + pr_err("Window paste address failed\n"); rc = -EINVAL; goto out; } @@ -525,7 +569,7 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) pfn = paste_addr >> PAGE_SHIFT; /* flags, page_prot from cxl_mmap(), except we want cachable */ - vma->vm_flags |= VM_IO | VM_PFNMAP; + vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY); @@ -533,8 +577,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, vma->vm_end - vma->vm_start, prot); - pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__, - paste_addr, vma->vm_start, rc); + pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr, + vma->vm_start, rc); txwin->task_ref.vma = vma; vma->vm_ops = &vas_vm_ops; @@ -581,7 +625,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type, pr_devel("%s device allocated, dev [%i,%i]\n", name, MAJOR(coproc_device.devt), MINOR(coproc_device.devt)); - coproc_device.class = class_create(mod, name); + coproc_device.class = class_create(name); if (IS_ERR(coproc_device.class)) { rc = PTR_ERR(coproc_device.class); pr_err("Unable to create %s class %d\n", name, rc); @@ -609,8 +653,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type, goto err; } - pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno), - MINOR(devno)); + pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno)); return 0; diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 34669b060f36..db65bfcd1e74 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -3,42 +3,6 @@ config PPC_CELL select PPC_64S_HASH_MMU if PPC64 bool -config PPC_CELL_COMMON - bool - select PPC_CELL - select PPC_DCR_MMIO - select PPC_INDIRECT_PIO - select PPC_INDIRECT_MMIO - select PPC_HASH_MMU_NATIVE - select PPC_RTAS - select IRQ_EDGE_EOI_HANDLER - -config PPC_CELL_NATIVE - bool - select PPC_CELL_COMMON - select MPIC - select PPC_IO_WORKAROUNDS - select IBM_EMAC_EMAC4 if IBM_EMAC - select IBM_EMAC_RGMII if IBM_EMAC - select IBM_EMAC_ZMII if IBM_EMAC #test only - select IBM_EMAC_TAH if IBM_EMAC #test only - -config PPC_IBM_CELL_BLADE - bool "IBM Cell Blade" - depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN - select PPC_CELL_NATIVE - select PPC_OF_PLATFORM_PCI - select FORCE_PCI - select MMIO_NVRAM - select PPC_UDBG_16550 - select UDBG_RTAS_CONSOLE - -config AXON_MSI - bool - depends on PPC_IBM_CELL_BLADE && PCI_MSI - select IRQ_DOMAIN_NOMAP - default y - menu "Cell Broadband Engine options" depends on PPC_CELL @@ -57,48 +21,4 @@ config SPU_BASE bool select PPC_COPRO_BASE -config CBE_RAS - bool "RAS features for bare metal Cell BE" - depends on PPC_CELL_NATIVE - default y - -config PPC_IBM_CELL_RESETBUTTON - bool "IBM Cell Blade Pinhole reset button" - depends on CBE_RAS && PPC_IBM_CELL_BLADE - default y - help - Support Pinhole Resetbutton on IBM Cell blades. - This adds a method to trigger system reset via front panel pinhole button. - -config PPC_IBM_CELL_POWERBUTTON - tristate "IBM Cell Blade power button" - depends on PPC_IBM_CELL_BLADE && INPUT_EVDEV - default y - help - Support Powerbutton on IBM Cell blades. - This will enable the powerbutton as an input device. - -config CBE_THERM - tristate "CBE thermal support" - default m - depends on CBE_RAS && SPU_BASE - -config PPC_PMI - tristate - default y - depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON - help - PMI (Platform Management Interrupt) is a way to - communicate with the BMC (Baseboard Management Controller). - It is used in some IBM Cell blades. - -config CBE_CPUFREQ_SPU_GOVERNOR - tristate "CBE frequency scaling based on SPU usage" - depends on SPU_FS && CPU_FREQ - default m - help - This governor checks for spu usage to adjust the cpu frequency. - If no spu is running on a given cpu, that cpu will be throttled to - the minimal possible frequency. - endmenu diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index 7ea6692f67e2..7e5ff239c376 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -1,27 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o - -obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ - pmu.o spider-pci.o -obj-$(CONFIG_CBE_RAS) += ras.o - -obj-$(CONFIG_CBE_THERM) += cbe_thermal.o -obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o - -obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o - -ifdef CONFIG_SMP -obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o -endif - -# needed only when building loadable spufs.ko -spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o -spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o - obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ spu_syscalls.o \ - $(spu-priv1-y) \ - $(spu-manage-y) \ spufs/ - -obj-$(CONFIG_AXON_MSI) += axon_msi.o diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c deleted file mode 100644 index f3291e957a19..000000000000 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ /dev/null @@ -1,479 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2007, Michael Ellerman, IBM Corporation. - */ - - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/msi.h> -#include <linux/export.h> -#include <linux/of_platform.h> -#include <linux/slab.h> -#include <linux/debugfs.h> -#include <linux/of_irq.h> - -#include <asm/dcr.h> -#include <asm/machdep.h> - -#include "cell.h" - -/* - * MSIC registers, specified as offsets from dcr_base - */ -#define MSIC_CTRL_REG 0x0 - -/* Base Address registers specify FIFO location in BE memory */ -#define MSIC_BASE_ADDR_HI_REG 0x3 -#define MSIC_BASE_ADDR_LO_REG 0x4 - -/* Hold the read/write offsets into the FIFO */ -#define MSIC_READ_OFFSET_REG 0x5 -#define MSIC_WRITE_OFFSET_REG 0x6 - - -/* MSIC control register flags */ -#define MSIC_CTRL_ENABLE 0x0001 -#define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 -#define MSIC_CTRL_IRQ_ENABLE 0x0008 -#define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 - -/* - * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. - * Currently we're using a 64KB FIFO size. - */ -#define MSIC_FIFO_SIZE_SHIFT 16 -#define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) - -/* - * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits - * 8-9 of the MSIC control reg. - */ -#define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) - -/* - * We need to mask the read/write offsets to make sure they stay within - * the bounds of the FIFO. Also they should always be 16-byte aligned. - */ -#define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) - -/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ -#define MSIC_FIFO_ENTRY_SIZE 0x10 - - -struct axon_msic { - struct irq_domain *irq_domain; - __le32 *fifo_virt; - dma_addr_t fifo_phys; - dcr_host_t dcr_host; - u32 read_offset; -#ifdef DEBUG - u32 __iomem *trigger; -#endif -}; - -#ifdef DEBUG -void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); -#else -static inline void axon_msi_debug_setup(struct device_node *dn, - struct axon_msic *msic) { } -#endif - - -static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) -{ - pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); - - dcr_write(msic->dcr_host, dcr_n, val); -} - -static void axon_msi_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct axon_msic *msic = irq_desc_get_handler_data(desc); - u32 write_offset, msi; - int idx; - int retry = 0; - - write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); - pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); - - /* write_offset doesn't wrap properly, so we have to mask it */ - write_offset &= MSIC_FIFO_SIZE_MASK; - - while (msic->read_offset != write_offset && retry < 100) { - idx = msic->read_offset / sizeof(__le32); - msi = le32_to_cpu(msic->fifo_virt[idx]); - msi &= 0xFFFF; - - pr_devel("axon_msi: woff %x roff %x msi %x\n", - write_offset, msic->read_offset, msi); - - if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { - generic_handle_irq(msi); - msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); - } else { - /* - * Reading the MSIC_WRITE_OFFSET_REG does not - * reliably flush the outstanding DMA to the - * FIFO buffer. Here we were reading stale - * data, so we need to retry. - */ - udelay(1); - retry++; - pr_devel("axon_msi: invalid irq 0x%x!\n", msi); - continue; - } - - if (retry) { - pr_devel("axon_msi: late irq 0x%x, retry %d\n", - msi, retry); - retry = 0; - } - - msic->read_offset += MSIC_FIFO_ENTRY_SIZE; - msic->read_offset &= MSIC_FIFO_SIZE_MASK; - } - - if (retry) { - printk(KERN_WARNING "axon_msi: irq timed out\n"); - - msic->read_offset += MSIC_FIFO_ENTRY_SIZE; - msic->read_offset &= MSIC_FIFO_SIZE_MASK; - } - - chip->irq_eoi(&desc->irq_data); -} - -static struct axon_msic *find_msi_translator(struct pci_dev *dev) -{ - struct irq_domain *irq_domain; - struct device_node *dn, *tmp; - const phandle *ph; - struct axon_msic *msic = NULL; - - dn = of_node_get(pci_device_to_OF_node(dev)); - if (!dn) { - dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); - return NULL; - } - - for (; dn; dn = of_get_next_parent(dn)) { - ph = of_get_property(dn, "msi-translator", NULL); - if (ph) - break; - } - - if (!ph) { - dev_dbg(&dev->dev, - "axon_msi: no msi-translator property found\n"); - goto out_error; - } - - tmp = dn; - dn = of_find_node_by_phandle(*ph); - of_node_put(tmp); - if (!dn) { - dev_dbg(&dev->dev, - "axon_msi: msi-translator doesn't point to a node\n"); - goto out_error; - } - - irq_domain = irq_find_host(dn); - if (!irq_domain) { - dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n", - dn); - goto out_error; - } - - msic = irq_domain->host_data; - -out_error: - of_node_put(dn); - - return msic; -} - -static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) -{ - struct device_node *dn; - int len; - const u32 *prop; - - dn = of_node_get(pci_device_to_OF_node(dev)); - if (!dn) { - dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); - return -ENODEV; - } - - for (; dn; dn = of_get_next_parent(dn)) { - if (!dev->no_64bit_msi) { - prop = of_get_property(dn, "msi-address-64", &len); - if (prop) - break; - } - - prop = of_get_property(dn, "msi-address-32", &len); - if (prop) - break; - } - - if (!prop) { - dev_dbg(&dev->dev, - "axon_msi: no msi-address-(32|64) properties found\n"); - return -ENOENT; - } - - switch (len) { - case 8: - msg->address_hi = prop[0]; - msg->address_lo = prop[1]; - break; - case 4: - msg->address_hi = 0; - msg->address_lo = prop[0]; - break; - default: - dev_dbg(&dev->dev, - "axon_msi: malformed msi-address-(32|64) property\n"); - of_node_put(dn); - return -EINVAL; - } - - of_node_put(dn); - - return 0; -} - -static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - unsigned int virq, rc; - struct msi_desc *entry; - struct msi_msg msg; - struct axon_msic *msic; - - msic = find_msi_translator(dev); - if (!msic) - return -ENODEV; - - rc = setup_msi_msg_address(dev, &msg); - if (rc) - return rc; - - msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { - virq = irq_create_direct_mapping(msic->irq_domain); - if (!virq) { - dev_warn(&dev->dev, - "axon_msi: virq allocation failed!\n"); - return -1; - } - dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); - - irq_set_msi_desc(virq, entry); - msg.data = virq; - pci_write_msi_msg(virq, &msg); - } - - return 0; -} - -static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) -{ - struct msi_desc *entry; - - dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); - - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) { - irq_set_msi_desc(entry->irq, NULL); - irq_dispose_mapping(entry->irq); - } -} - -static struct irq_chip msic_irq_chip = { - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, - .irq_shutdown = pci_msi_mask_irq, - .name = "AXON-MSI", -}; - -static int msic_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - irq_set_chip_data(virq, h->host_data); - irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); - - return 0; -} - -static const struct irq_domain_ops msic_host_ops = { - .map = msic_host_map, -}; - -static void axon_msi_shutdown(struct platform_device *device) -{ - struct axon_msic *msic = dev_get_drvdata(&device->dev); - u32 tmp; - - pr_devel("axon_msi: disabling %pOF\n", - irq_domain_get_of_node(msic->irq_domain)); - tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); - tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; - msic_dcr_write(msic, MSIC_CTRL_REG, tmp); -} - -static int axon_msi_probe(struct platform_device *device) -{ - struct device_node *dn = device->dev.of_node; - struct axon_msic *msic; - unsigned int virq; - int dcr_base, dcr_len; - - pr_devel("axon_msi: setting up dn %pOF\n", dn); - - msic = kzalloc(sizeof(*msic), GFP_KERNEL); - if (!msic) { - printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n", - dn); - goto out; - } - - dcr_base = dcr_resource_start(dn, 0); - dcr_len = dcr_resource_len(dn, 0); - - if (dcr_base == 0 || dcr_len == 0) { - printk(KERN_ERR - "axon_msi: couldn't parse dcr properties on %pOF\n", - dn); - goto out_free_msic; - } - - msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); - if (!DCR_MAP_OK(msic->dcr_host)) { - printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n", - dn); - goto out_free_msic; - } - - msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, - &msic->fifo_phys, GFP_KERNEL); - if (!msic->fifo_virt) { - printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n", - dn); - goto out_free_msic; - } - - virq = irq_of_parse_and_map(dn, 0); - if (!virq) { - printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n", - dn); - goto out_free_fifo; - } - memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - - /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ - msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); - if (!msic->irq_domain) { - printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n", - dn); - goto out_free_fifo; - } - - irq_set_handler_data(virq, msic); - irq_set_chained_handler(virq, axon_msi_cascade); - pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); - - /* Enable the MSIC hardware */ - msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); - msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, - msic->fifo_phys & 0xFFFFFFFF); - msic_dcr_write(msic, MSIC_CTRL_REG, - MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | - MSIC_CTRL_FIFO_SIZE); - - msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) - & MSIC_FIFO_SIZE_MASK; - - dev_set_drvdata(&device->dev, msic); - - cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs; - cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs; - - axon_msi_debug_setup(dn, msic); - - printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn); - - return 0; - -out_free_fifo: - dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, - msic->fifo_phys); -out_free_msic: - kfree(msic); -out: - - return -1; -} - -static const struct of_device_id axon_msi_device_id[] = { - { - .compatible = "ibm,axon-msic" - }, - {} -}; - -static struct platform_driver axon_msi_driver = { - .probe = axon_msi_probe, - .shutdown = axon_msi_shutdown, - .driver = { - .name = "axon-msi", - .of_match_table = axon_msi_device_id, - }, -}; - -static int __init axon_msi_init(void) -{ - return platform_driver_register(&axon_msi_driver); -} -subsys_initcall(axon_msi_init); - - -#ifdef DEBUG -static int msic_set(void *data, u64 val) -{ - struct axon_msic *msic = data; - out_le32(msic->trigger, val); - return 0; -} - -static int msic_get(void *data, u64 *val) -{ - *val = 0; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); - -void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) -{ - char name[8]; - u64 addr; - - addr = of_translate_address(dn, of_get_property(dn, "reg", NULL)); - if (addr == OF_BAD_ADDR) { - pr_devel("axon_msi: couldn't translate reg property\n"); - return; - } - - msic->trigger = ioremap(addr, 0x4); - if (!msic->trigger) { - pr_devel("axon_msi: ioremap failed\n"); - return; - } - - snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); - - debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic); -} -#endif /* DEBUG */ diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c deleted file mode 100644 index a3ee397486f6..000000000000 --- a/arch/powerpc/platforms/cell/cbe_powerbutton.c +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * driver for powerbutton on IBM cell blades - * - * (C) Copyright IBM Corp. 2005-2008 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/input.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/platform_device.h> -#include <asm/pmi.h> - -static struct input_dev *button_dev; -static struct platform_device *button_pdev; - -static void cbe_powerbutton_handle_pmi(pmi_message_t pmi_msg) -{ - BUG_ON(pmi_msg.type != PMI_TYPE_POWER_BUTTON); - - input_report_key(button_dev, KEY_POWER, 1); - input_sync(button_dev); - input_report_key(button_dev, KEY_POWER, 0); - input_sync(button_dev); -} - -static struct pmi_handler cbe_pmi_handler = { - .type = PMI_TYPE_POWER_BUTTON, - .handle_pmi_message = cbe_powerbutton_handle_pmi, -}; - -static int __init cbe_powerbutton_init(void) -{ - int ret = 0; - struct input_dev *dev; - - if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) { - printk(KERN_ERR "%s: Not a cell blade.\n", __func__); - ret = -ENODEV; - goto out; - } - - dev = input_allocate_device(); - if (!dev) { - ret = -ENOMEM; - printk(KERN_ERR "%s: Not enough memory.\n", __func__); - goto out; - } - - set_bit(EV_KEY, dev->evbit); - set_bit(KEY_POWER, dev->keybit); - - dev->name = "Power Button"; - dev->id.bustype = BUS_HOST; - - /* this makes the button look like an acpi power button - * no clue whether anyone relies on that though */ - dev->id.product = 0x02; - dev->phys = "LNXPWRBN/button/input0"; - - button_pdev = platform_device_register_simple("power_button", 0, NULL, 0); - if (IS_ERR(button_pdev)) { - ret = PTR_ERR(button_pdev); - goto out_free_input; - } - - dev->dev.parent = &button_pdev->dev; - ret = input_register_device(dev); - if (ret) { - printk(KERN_ERR "%s: Failed to register device\n", __func__); - goto out_free_pdev; - } - - button_dev = dev; - - ret = pmi_register_handler(&cbe_pmi_handler); - if (ret) { - printk(KERN_ERR "%s: Failed to register with pmi.\n", __func__); - goto out_free_pdev; - } - - goto out; - -out_free_pdev: - platform_device_unregister(button_pdev); -out_free_input: - input_free_device(dev); -out: - return ret; -} - -static void __exit cbe_powerbutton_exit(void) -{ - pmi_unregister_handler(&cbe_pmi_handler); - platform_device_unregister(button_pdev); - input_free_device(button_dev); -} - -module_init(cbe_powerbutton_init); -module_exit(cbe_powerbutton_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c deleted file mode 100644 index 316e533afc00..000000000000 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ /dev/null @@ -1,282 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * cbe_regs.c - * - * Accessor routines for the various MMIO register blocks of the CBE - * - * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. - */ - -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/export.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> -#include <linux/pgtable.h> - -#include <asm/io.h> -#include <asm/ptrace.h> -#include <asm/cell-regs.h> - -/* - * Current implementation uses "cpu" nodes. We build our own mapping - * array of cpu numbers to cpu nodes locally for now to allow interrupt - * time code to have a fast path rather than call of_get_cpu_node(). If - * we implement cpu hotplug, we'll have to install an appropriate notifier - * in order to release references to the cpu going away - */ -static struct cbe_regs_map -{ - struct device_node *cpu_node; - struct device_node *be_node; - struct cbe_pmd_regs __iomem *pmd_regs; - struct cbe_iic_regs __iomem *iic_regs; - struct cbe_mic_tm_regs __iomem *mic_tm_regs; - struct cbe_pmd_shadow_regs pmd_shadow_regs; -} cbe_regs_maps[MAX_CBE]; -static int cbe_regs_map_count; - -static struct cbe_thread_map -{ - struct device_node *cpu_node; - struct device_node *be_node; - struct cbe_regs_map *regs; - unsigned int thread_id; - unsigned int cbe_id; -} cbe_thread_map[NR_CPUS]; - -static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; -static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; - -static struct cbe_regs_map *cbe_find_map(struct device_node *np) -{ - int i; - struct device_node *tmp_np; - - if (!of_node_is_type(np, "spe")) { - for (i = 0; i < cbe_regs_map_count; i++) - if (cbe_regs_maps[i].cpu_node == np || - cbe_regs_maps[i].be_node == np) - return &cbe_regs_maps[i]; - return NULL; - } - - if (np->data) - return np->data; - - /* walk up path until cpu or be node was found */ - tmp_np = np; - do { - tmp_np = tmp_np->parent; - /* on a correct devicetree we wont get up to root */ - BUG_ON(!tmp_np); - } while (!of_node_is_type(tmp_np, "cpu") || - !of_node_is_type(tmp_np, "be")); - - np->data = cbe_find_map(tmp_np); - - return np->data; -} - -struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->pmd_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_pmd_regs); - -struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->pmd_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs); - -struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return &map->pmd_shadow_regs; -} - -struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return &map->pmd_shadow_regs; -} - -struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->iic_regs; -} - -struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->iic_regs; -} - -struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->mic_tm_regs; -} - -struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->mic_tm_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); - -u32 cbe_get_hw_thread_id(int cpu) -{ - return cbe_thread_map[cpu].thread_id; -} -EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); - -u32 cbe_cpu_to_node(int cpu) -{ - return cbe_thread_map[cpu].cbe_id; -} -EXPORT_SYMBOL_GPL(cbe_cpu_to_node); - -u32 cbe_node_to_cpu(int node) -{ - return cpumask_first(&cbe_local_mask[node]); - -} -EXPORT_SYMBOL_GPL(cbe_node_to_cpu); - -static struct device_node *__init cbe_get_be_node(int cpu_id) -{ - struct device_node *np; - - for_each_node_by_type (np, "be") { - int len,i; - const phandle *cpu_handle; - - cpu_handle = of_get_property(np, "cpus", &len); - - /* - * the CAB SLOF tree is non compliant, so we just assume - * there is only one node - */ - if (WARN_ON_ONCE(!cpu_handle)) - return np; - - for (i=0; i<len; i++) - if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL)) - return np; - } - - return NULL; -} - -static void __init cbe_fill_regs_map(struct cbe_regs_map *map) -{ - if(map->be_node) { - struct device_node *be, *np; - - be = map->be_node; - - for_each_node_by_type(np, "pervasive") - if (of_get_parent(np) == be) - map->pmd_regs = of_iomap(np, 0); - - for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") - if (of_get_parent(np) == be) - map->iic_regs = of_iomap(np, 2); - - for_each_node_by_type(np, "mic-tm") - if (of_get_parent(np) == be) - map->mic_tm_regs = of_iomap(np, 0); - } else { - struct device_node *cpu; - /* That hack must die die die ! */ - const struct address_prop { - unsigned long address; - unsigned int len; - } __attribute__((packed)) *prop; - - cpu = map->cpu_node; - - prop = of_get_property(cpu, "pervasive", NULL); - if (prop != NULL) - map->pmd_regs = ioremap(prop->address, prop->len); - - prop = of_get_property(cpu, "iic", NULL); - if (prop != NULL) - map->iic_regs = ioremap(prop->address, prop->len); - - prop = of_get_property(cpu, "mic-tm", NULL); - if (prop != NULL) - map->mic_tm_regs = ioremap(prop->address, prop->len); - } -} - - -void __init cbe_regs_init(void) -{ - int i; - unsigned int thread_id; - struct device_node *cpu; - - /* Build local fast map of CPUs */ - for_each_possible_cpu(i) { - cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); - cbe_thread_map[i].be_node = cbe_get_be_node(i); - cbe_thread_map[i].thread_id = thread_id; - } - - /* Find maps for each device tree CPU */ - for_each_node_by_type(cpu, "cpu") { - struct cbe_regs_map *map; - unsigned int cbe_id; - - cbe_id = cbe_regs_map_count++; - map = &cbe_regs_maps[cbe_id]; - - if (cbe_regs_map_count > MAX_CBE) { - printk(KERN_ERR "cbe_regs: More BE chips than supported" - "!\n"); - cbe_regs_map_count--; - of_node_put(cpu); - return; - } - map->cpu_node = cpu; - - for_each_possible_cpu(i) { - struct cbe_thread_map *thread = &cbe_thread_map[i]; - - if (thread->cpu_node == cpu) { - thread->regs = map; - thread->cbe_id = cbe_id; - map->be_node = thread->be_node; - cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); - if(thread->thread_id == 0) - cpumask_set_cpu(i, &cbe_first_online_cpu); - } - } - - cbe_fill_regs_map(map); - } -} - diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c deleted file mode 100644 index 2f45428e32c8..000000000000 --- a/arch/powerpc/platforms/cell/cbe_thermal.c +++ /dev/null @@ -1,386 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * thermal support for the cell processor - * - * This module adds some sysfs attributes to cpu and spu nodes. - * Base for measurements are the digital thermal sensors (DTS) - * located on the chip. - * The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius - * The attributes can be found under - * /sys/devices/system/cpu/cpuX/thermal - * /sys/devices/system/spu/spuX/thermal - * - * The following attributes are added for each node: - * temperature: - * contains the current temperature measured by the DTS - * throttle_begin: - * throttling begins when temperature is greater or equal to - * throttle_begin. Setting this value to 125 prevents throttling. - * throttle_end: - * throttling is being ceased, if the temperature is lower than - * throttle_end. Due to a delay between applying throttling and - * a reduced temperature this value should be less than throttle_begin. - * A value equal to throttle_begin provides only a very little hysteresis. - * throttle_full_stop: - * If the temperatrue is greater or equal to throttle_full_stop, - * full throttling is applied to the cpu or spu. This value should be - * greater than throttle_begin and throttle_end. Setting this value to - * 65 prevents the unit from running code at all. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/module.h> -#include <linux/device.h> -#include <linux/kernel.h> -#include <linux/cpu.h> -#include <linux/stringify.h> -#include <asm/spu.h> -#include <asm/io.h> -#include <asm/cell-regs.h> - -#include "spu_priv1_mmio.h" - -#define TEMP_MIN 65 -#define TEMP_MAX 125 - -#define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \ -struct device_attribute attr_ ## _prefix ## _ ## _name = { \ - .attr = { .name = __stringify(_name), .mode = _mode }, \ - .show = _prefix ## _show_ ## _name, \ - .store = _prefix ## _store_ ## _name, \ -}; - -static inline u8 reg_to_temp(u8 reg_value) -{ - return ((reg_value & 0x3f) << 1) + TEMP_MIN; -} - -static inline u8 temp_to_reg(u8 temp) -{ - return ((temp - TEMP_MIN) >> 1) & 0x3f; -} - -static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev) -{ - struct spu *spu; - - spu = container_of(dev, struct spu, dev); - - return cbe_get_pmd_regs(spu_devnode(spu)); -} - -/* returns the value for a given spu in a given register */ -static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg) -{ - union spe_reg value; - struct spu *spu; - - spu = container_of(dev, struct spu, dev); - value.val = in_be64(®->val); - - return value.spe[spu->spe_id]; -} - -static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr, - char *buf) -{ - u8 value; - struct cbe_pmd_regs __iomem *pmd_regs; - - pmd_regs = get_pmd_regs(dev); - - value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1); - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - -static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos) -{ - u64 value; - - value = in_be64(&pmd_regs->tm_tpr.val); - /* access the corresponding byte */ - value >>= pos; - value &= 0x3F; - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - -static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) -{ - u64 reg_value; - unsigned int temp; - u64 new_value; - int ret; - - ret = sscanf(buf, "%u", &temp); - - if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX) - return -EINVAL; - - new_value = temp_to_reg(temp); - - reg_value = in_be64(&pmd_regs->tm_tpr.val); - - /* zero out bits for new value */ - reg_value &= ~(0xffull << pos); - /* set bits to new value */ - reg_value |= new_value << pos; - - out_be64(&pmd_regs->tm_tpr.val, reg_value); - return size; -} - -static ssize_t spu_show_throttle_end(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 0); -} - -static ssize_t spu_show_throttle_begin(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 8); -} - -static ssize_t spu_show_throttle_full_stop(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 16); -} - -static ssize_t spu_store_throttle_end(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 0); -} - -static ssize_t spu_store_throttle_begin(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 8); -} - -static ssize_t spu_store_throttle_full_stop(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 16); -} - -static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos) -{ - struct cbe_pmd_regs __iomem *pmd_regs; - u64 value; - - pmd_regs = cbe_get_cpu_pmd_regs(dev->id); - value = in_be64(&pmd_regs->ts_ctsr2); - - value = (value >> pos) & 0x3f; - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - - -/* shows the temperature of the DTS on the PPE, - * located near the linear thermal sensor */ -static ssize_t ppe_show_temp0(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return ppe_show_temp(dev, buf, 32); -} - -/* shows the temperature of the second DTS on the PPE */ -static ssize_t ppe_show_temp1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return ppe_show_temp(dev, buf, 0); -} - -static ssize_t ppe_show_throttle_end(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32); -} - -static ssize_t ppe_show_throttle_begin(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40); -} - -static ssize_t ppe_show_throttle_full_stop(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48); -} - -static ssize_t ppe_store_throttle_end(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32); -} - -static ssize_t ppe_store_throttle_begin(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40); -} - -static ssize_t ppe_store_throttle_full_stop(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48); -} - - -static struct device_attribute attr_spu_temperature = { - .attr = {.name = "temperature", .mode = 0400 }, - .show = spu_show_temp, -}; - -static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600); -static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600); -static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600); - - -static struct attribute *spu_attributes[] = { - &attr_spu_temperature.attr, - &attr_spu_throttle_end.attr, - &attr_spu_throttle_begin.attr, - &attr_spu_throttle_full_stop.attr, - NULL, -}; - -static const struct attribute_group spu_attribute_group = { - .name = "thermal", - .attrs = spu_attributes, -}; - -static struct device_attribute attr_ppe_temperature0 = { - .attr = {.name = "temperature0", .mode = 0400 }, - .show = ppe_show_temp0, -}; - -static struct device_attribute attr_ppe_temperature1 = { - .attr = {.name = "temperature1", .mode = 0400 }, - .show = ppe_show_temp1, -}; - -static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600); -static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600); -static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600); - -static struct attribute *ppe_attributes[] = { - &attr_ppe_temperature0.attr, - &attr_ppe_temperature1.attr, - &attr_ppe_throttle_end.attr, - &attr_ppe_throttle_begin.attr, - &attr_ppe_throttle_full_stop.attr, - NULL, -}; - -static struct attribute_group ppe_attribute_group = { - .name = "thermal", - .attrs = ppe_attributes, -}; - -/* - * initialize throttling with default values - */ -static int __init init_default_values(void) -{ - int cpu; - struct cbe_pmd_regs __iomem *pmd_regs; - struct device *dev; - union ppe_spe_reg tpr; - union spe_reg str1; - u64 str2; - union spe_reg cr1; - u64 cr2; - - /* TPR defaults */ - /* ppe - * 1F - no full stop - * 08 - dynamic throttling starts if over 80 degrees - * 03 - dynamic throttling ceases if below 70 degrees */ - tpr.ppe = 0x1F0803; - /* spe - * 10 - full stopped when over 96 degrees - * 08 - dynamic throttling starts if over 80 degrees - * 03 - dynamic throttling ceases if below 70 degrees - */ - tpr.spe = 0x100803; - - /* STR defaults */ - /* str1 - * 10 - stop 16 of 32 cycles - */ - str1.val = 0x1010101010101010ull; - /* str2 - * 10 - stop 16 of 32 cycles - */ - str2 = 0x10; - - /* CR defaults */ - /* cr1 - * 4 - normal operation - */ - cr1.val = 0x0404040404040404ull; - /* cr2 - * 4 - normal operation - */ - cr2 = 0x04; - - for_each_possible_cpu (cpu) { - pr_debug("processing cpu %d\n", cpu); - dev = get_cpu_device(cpu); - - if (!dev) { - pr_info("invalid dev pointer for cbe_thermal\n"); - return -EINVAL; - } - - pmd_regs = cbe_get_cpu_pmd_regs(dev->id); - - if (!pmd_regs) { - pr_info("invalid CBE regs pointer for cbe_thermal\n"); - return -EINVAL; - } - - out_be64(&pmd_regs->tm_str2, str2); - out_be64(&pmd_regs->tm_str1.val, str1.val); - out_be64(&pmd_regs->tm_tpr.val, tpr.val); - out_be64(&pmd_regs->tm_cr1.val, cr1.val); - out_be64(&pmd_regs->tm_cr2, cr2); - } - - return 0; -} - - -static int __init thermal_init(void) -{ - int rc = init_default_values(); - - if (rc == 0) { - spu_add_dev_attr_group(&spu_attribute_group); - cpu_add_dev_attr_group(&ppe_attribute_group); - } - - return rc; -} -module_init(thermal_init); - -static void __exit thermal_exit(void) -{ - spu_remove_dev_attr_group(&spu_attribute_group); - cpu_remove_dev_attr_group(&ppe_attribute_group); -} -module_exit(thermal_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); - diff --git a/arch/powerpc/platforms/cell/cell.h b/arch/powerpc/platforms/cell/cell.h deleted file mode 100644 index d5142e905ab3..000000000000 --- a/arch/powerpc/platforms/cell/cell.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Cell Platform common data structures - * - * Copyright 2015, Daniel Axtens, IBM Corporation - */ - -#ifndef CELL_H -#define CELL_H - -#include <asm/pci-bridge.h> - -extern struct pci_controller_ops cell_pci_controller_ops; - -#endif diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c deleted file mode 100644 index ca7849e113d7..000000000000 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * spu aware cpufreq governor for the cell processor - * - * © Copyright IBM Corporation 2006-2008 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/cpufreq.h> -#include <linux/sched.h> -#include <linux/sched/loadavg.h> -#include <linux/module.h> -#include <linux/timer.h> -#include <linux/workqueue.h> -#include <linux/atomic.h> -#include <asm/machdep.h> -#include <asm/spu.h> - -#define POLL_TIME 100000 /* in µs */ -#define EXP 753 /* exp(-1) in fixed-point */ - -struct spu_gov_info_struct { - unsigned long busy_spus; /* fixed-point */ - struct cpufreq_policy *policy; - struct delayed_work work; - unsigned int poll_int; /* µs */ -}; -static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); - -static int calc_freq(struct spu_gov_info_struct *info) -{ - int cpu; - int busy_spus; - - cpu = info->policy->cpu; - busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus); - - info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1); - pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n", - cpu, busy_spus, info->busy_spus); - - return info->policy->max * info->busy_spus / FIXED_1; -} - -static void spu_gov_work(struct work_struct *work) -{ - struct spu_gov_info_struct *info; - int delay; - unsigned long target_freq; - - info = container_of(work, struct spu_gov_info_struct, work.work); - - /* after cancel_delayed_work_sync we unset info->policy */ - BUG_ON(info->policy == NULL); - - target_freq = calc_freq(info); - __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); - - delay = usecs_to_jiffies(info->poll_int); - schedule_delayed_work_on(info->policy->cpu, &info->work, delay); -} - -static void spu_gov_init_work(struct spu_gov_info_struct *info) -{ - int delay = usecs_to_jiffies(info->poll_int); - INIT_DEFERRABLE_WORK(&info->work, spu_gov_work); - schedule_delayed_work_on(info->policy->cpu, &info->work, delay); -} - -static void spu_gov_cancel_work(struct spu_gov_info_struct *info) -{ - cancel_delayed_work_sync(&info->work); -} - -static int spu_gov_start(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); - struct spu_gov_info_struct *affected_info; - int i; - - if (!cpu_online(cpu)) { - printk(KERN_ERR "cpu %d is not online\n", cpu); - return -EINVAL; - } - - if (!policy->cur) { - printk(KERN_ERR "no cpu specified in policy\n"); - return -EINVAL; - } - - /* initialize spu_gov_info for all affected cpus */ - for_each_cpu(i, policy->cpus) { - affected_info = &per_cpu(spu_gov_info, i); - affected_info->policy = policy; - } - - info->poll_int = POLL_TIME; - - /* setup timer */ - spu_gov_init_work(info); - - return 0; -} - -static void spu_gov_stop(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); - int i; - - /* cancel timer */ - spu_gov_cancel_work(info); - - /* clean spu_gov_info for all affected cpus */ - for_each_cpu (i, policy->cpus) { - info = &per_cpu(spu_gov_info, i); - info->policy = NULL; - } -} - -static struct cpufreq_governor spu_governor = { - .name = "spudemand", - .start = spu_gov_start, - .stop = spu_gov_stop, - .owner = THIS_MODULE, -}; -cpufreq_governor_init(spu_governor); -cpufreq_governor_exit(spu_governor); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c deleted file mode 100644 index 03ee8152ee97..000000000000 --- a/arch/powerpc/platforms/cell/interrupt.c +++ /dev/null @@ -1,390 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cell Internal Interrupt Controller - * - * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) - * IBM, Corp. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Arnd Bergmann <arndb@de.ibm.com> - * - * TODO: - * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers - * vs node numbers in the setup code - * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from - * a non-active node to the active node) - */ - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> -#include <linux/export.h> -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/ioport.h> -#include <linux/kernel_stat.h> -#include <linux/pgtable.h> -#include <linux/of_address.h> - -#include <asm/io.h> -#include <asm/ptrace.h> -#include <asm/machdep.h> -#include <asm/cell-regs.h> - -#include "interrupt.h" - -struct iic { - struct cbe_iic_thread_regs __iomem *regs; - u8 target_id; - u8 eoi_stack[16]; - int eoi_ptr; - struct device_node *node; -}; - -static DEFINE_PER_CPU(struct iic, cpu_iic); -#define IIC_NODE_COUNT 2 -static struct irq_domain *iic_host; - -/* Convert between "pending" bits and hw irq number */ -static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) -{ - unsigned char unit = bits.source & 0xf; - unsigned char node = bits.source >> 4; - unsigned char class = bits.class & 3; - - /* Decode IPIs */ - if (bits.flags & CBE_IIC_IRQ_IPI) - return IIC_IRQ_TYPE_IPI | (bits.prio >> 4); - else - return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; -} - -static void iic_mask(struct irq_data *d) -{ -} - -static void iic_unmask(struct irq_data *d) -{ -} - -static void iic_eoi(struct irq_data *d) -{ - struct iic *iic = this_cpu_ptr(&cpu_iic); - out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); - BUG_ON(iic->eoi_ptr < 0); -} - -static struct irq_chip iic_chip = { - .name = "CELL-IIC", - .irq_mask = iic_mask, - .irq_unmask = iic_unmask, - .irq_eoi = iic_eoi, -}; - - -static void iic_ioexc_eoi(struct irq_data *d) -{ -} - -static void iic_ioexc_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct cbe_iic_regs __iomem *node_iic = - (void __iomem *)irq_desc_get_handler_data(desc); - unsigned int irq = irq_desc_get_irq(desc); - unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; - unsigned long bits, ack; - int cascade; - - for (;;) { - bits = in_be64(&node_iic->iic_is); - if (bits == 0) - break; - /* pre-ack edge interrupts */ - ack = bits & IIC_ISR_EDGE_MASK; - if (ack) - out_be64(&node_iic->iic_is, ack); - /* handle them */ - for (cascade = 63; cascade >= 0; cascade--) - if (bits & (0x8000000000000000UL >> cascade)) - generic_handle_domain_irq(iic_host, - base | cascade); - /* post-ack level interrupts */ - ack = bits & ~IIC_ISR_EDGE_MASK; - if (ack) - out_be64(&node_iic->iic_is, ack); - } - chip->irq_eoi(&desc->irq_data); -} - - -static struct irq_chip iic_ioexc_chip = { - .name = "CELL-IOEX", - .irq_mask = iic_mask, - .irq_unmask = iic_unmask, - .irq_eoi = iic_ioexc_eoi, -}; - -/* Get an IRQ number from the pending state register of the IIC */ -static unsigned int iic_get_irq(void) -{ - struct cbe_iic_pending_bits pending; - struct iic *iic; - unsigned int virq; - - iic = this_cpu_ptr(&cpu_iic); - *(unsigned long *) &pending = - in_be64((u64 __iomem *) &iic->regs->pending_destr); - if (!(pending.flags & CBE_IIC_IRQ_VALID)) - return 0; - virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending)); - if (!virq) - return 0; - iic->eoi_stack[++iic->eoi_ptr] = pending.prio; - BUG_ON(iic->eoi_ptr > 15); - return virq; -} - -void iic_setup_cpu(void) -{ - out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff); -} - -u8 iic_get_target_id(int cpu) -{ - return per_cpu(cpu_iic, cpu).target_id; -} - -EXPORT_SYMBOL_GPL(iic_get_target_id); - -#ifdef CONFIG_SMP - -/* Use the highest interrupt priorities for IPI */ -static inline int iic_msg_to_irq(int msg) -{ - return IIC_IRQ_TYPE_IPI + 0xf - msg; -} - -void iic_message_pass(int cpu, int msg) -{ - out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); -} - -static void iic_request_ipi(int msg) -{ - int virq; - - virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg)); - if (!virq) { - printk(KERN_ERR - "iic: failed to map IPI %s\n", smp_ipi_name[msg]); - return; - } - - /* - * If smp_request_message_ipi encounters an error it will notify - * the error. If a message is not needed it will return non-zero. - */ - if (smp_request_message_ipi(virq, msg)) - irq_dispose_mapping(virq); -} - -void iic_request_IPIs(void) -{ - iic_request_ipi(PPC_MSG_CALL_FUNCTION); - iic_request_ipi(PPC_MSG_RESCHEDULE); - iic_request_ipi(PPC_MSG_TICK_BROADCAST); - iic_request_ipi(PPC_MSG_NMI_IPI); -} - -#endif /* CONFIG_SMP */ - - -static int iic_host_match(struct irq_domain *h, struct device_node *node, - enum irq_domain_bus_token bus_token) -{ - return of_device_is_compatible(node, - "IBM,CBEA-Internal-Interrupt-Controller"); -} - -static int iic_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - switch (hw & IIC_IRQ_TYPE_MASK) { - case IIC_IRQ_TYPE_IPI: - irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); - break; - case IIC_IRQ_TYPE_IOEXC: - irq_set_chip_and_handler(virq, &iic_ioexc_chip, - handle_edge_eoi_irq); - break; - default: - irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); - } - return 0; -} - -static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) - -{ - unsigned int node, ext, unit, class; - const u32 *val; - - if (!of_device_is_compatible(ct, - "IBM,CBEA-Internal-Interrupt-Controller")) - return -ENODEV; - if (intsize != 1) - return -ENODEV; - val = of_get_property(ct, "#interrupt-cells", NULL); - if (val == NULL || *val != 1) - return -ENODEV; - - node = intspec[0] >> 24; - ext = (intspec[0] >> 16) & 0xff; - class = (intspec[0] >> 8) & 0xff; - unit = intspec[0] & 0xff; - - /* Check if node is in supported range */ - if (node > 1) - return -EINVAL; - - /* Build up interrupt number, special case for IO exceptions */ - *out_hwirq = (node << IIC_IRQ_NODE_SHIFT); - if (unit == IIC_UNIT_IIC && class == 1) - *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext; - else - *out_hwirq |= IIC_IRQ_TYPE_NORMAL | - (class << IIC_IRQ_CLASS_SHIFT) | unit; - - /* Dummy flags, ignored by iic code */ - *out_flags = IRQ_TYPE_EDGE_RISING; - - return 0; -} - -static const struct irq_domain_ops iic_host_ops = { - .match = iic_host_match, - .map = iic_host_map, - .xlate = iic_host_xlate, -}; - -static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, - struct device_node *node) -{ - /* XXX FIXME: should locate the linux CPU number from the HW cpu - * number properly. We are lucky for now - */ - struct iic *iic = &per_cpu(cpu_iic, hw_cpu); - - iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); - BUG_ON(iic->regs == NULL); - - iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); - iic->eoi_stack[0] = 0xff; - iic->node = of_node_get(node); - out_be64(&iic->regs->prio, 0); - - printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n", - hw_cpu, iic->target_id, node); -} - -static int __init setup_iic(void) -{ - struct device_node *dn; - struct resource r0, r1; - unsigned int node, cascade, found = 0; - struct cbe_iic_regs __iomem *node_iic; - const u32 *np; - - for_each_node_by_name(dn, "interrupt-controller") { - if (!of_device_is_compatible(dn, - "IBM,CBEA-Internal-Interrupt-Controller")) - continue; - np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL); - if (np == NULL) { - printk(KERN_WARNING "IIC: CPU association not found\n"); - of_node_put(dn); - return -ENODEV; - } - if (of_address_to_resource(dn, 0, &r0) || - of_address_to_resource(dn, 1, &r1)) { - printk(KERN_WARNING "IIC: Can't resolve addresses\n"); - of_node_put(dn); - return -ENODEV; - } - found++; - init_one_iic(np[0], r0.start, dn); - init_one_iic(np[1], r1.start, dn); - - /* Setup cascade for IO exceptions. XXX cleanup tricks to get - * node vs CPU etc... - * Note that we configure the IIC_IRR here with a hard coded - * priority of 1. We might want to improve that later. - */ - node = np[0] >> 1; - node_iic = cbe_get_cpu_iic_regs(np[0]); - cascade = node << IIC_IRQ_NODE_SHIFT; - cascade |= 1 << IIC_IRQ_CLASS_SHIFT; - cascade |= IIC_UNIT_IIC; - cascade = irq_create_mapping(iic_host, cascade); - if (!cascade) - continue; - /* - * irq_data is a generic pointer that gets passed back - * to us later, so the forced cast is fine. - */ - irq_set_handler_data(cascade, (void __force *)node_iic); - irq_set_chained_handler(cascade, iic_ioexc_cascade); - out_be64(&node_iic->iic_ir, - (1 << 12) /* priority */ | - (node << 4) /* dest node */ | - IIC_UNIT_THREAD_0 /* route them to thread 0 */); - /* Flush pending (make sure it triggers if there is - * anything pending - */ - out_be64(&node_iic->iic_is, 0xfffffffffffffffful); - } - - if (found) - return 0; - else - return -ENODEV; -} - -void __init iic_init_IRQ(void) -{ - /* Setup an irq host data structure */ - iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops, - NULL); - BUG_ON(iic_host == NULL); - irq_set_default_host(iic_host); - - /* Discover and initialize iics */ - if (setup_iic() < 0) - panic("IIC: Failed to initialize !\n"); - - /* Set master interrupt handling function */ - ppc_md.get_irq = iic_get_irq; - - /* Enable on current CPU */ - iic_setup_cpu(); -} - -void iic_set_interrupt_routing(int cpu, int thread, int priority) -{ - struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu); - u64 iic_ir = 0; - int node = cpu >> 1; - - /* Set which node and thread will handle the next interrupt */ - iic_ir |= CBE_IIC_IR_PRIO(priority) | - CBE_IIC_IR_DEST_NODE(node); - if (thread == 0) - iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0); - else - iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1); - out_be64(&iic_regs->iic_ir, iic_ir); -} diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h deleted file mode 100644 index a47902248541..000000000000 --- a/arch/powerpc/platforms/cell/interrupt.h +++ /dev/null @@ -1,90 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_CELL_PIC_H -#define ASM_CELL_PIC_H -#ifdef __KERNEL__ -/* - * Mapping of IIC pending bits into per-node interrupt numbers. - * - * Interrupt numbers are in the range 0...0x1ff where the top bit - * (0x100) represent the source node. Only 2 nodes are supported with - * the current code though it's trivial to extend that if necessary using - * higher level bits - * - * The bottom 8 bits are split into 2 type bits and 6 data bits that - * depend on the type: - * - * 00 (0x00 | data) : normal interrupt. data is (class << 4) | source - * 01 (0x40 | data) : IO exception. data is the exception number as - * defined by bit numbers in IIC_SR - * 10 (0x80 | data) : IPI. data is the IPI number (obtained from the priority) - * and node is always 0 (IPIs are per-cpu, their source is - * not relevant) - * 11 (0xc0 | data) : reserved - * - * In addition, interrupt number 0x80000000 is defined as always invalid - * (that is the node field is expected to never extend to move than 23 bits) - * - */ - -enum { - IIC_IRQ_INVALID = 0x80000000u, - IIC_IRQ_NODE_MASK = 0x100, - IIC_IRQ_NODE_SHIFT = 8, - IIC_IRQ_MAX = 0x1ff, - IIC_IRQ_TYPE_MASK = 0xc0, - IIC_IRQ_TYPE_NORMAL = 0x00, - IIC_IRQ_TYPE_IOEXC = 0x40, - IIC_IRQ_TYPE_IPI = 0x80, - IIC_IRQ_CLASS_SHIFT = 4, - IIC_IRQ_CLASS_0 = 0x00, - IIC_IRQ_CLASS_1 = 0x10, - IIC_IRQ_CLASS_2 = 0x20, - IIC_SOURCE_COUNT = 0x200, - - /* Here are defined the various source/dest units. Avoid using those - * definitions if you can, they are mostly here for reference - */ - IIC_UNIT_SPU_0 = 0x4, - IIC_UNIT_SPU_1 = 0x7, - IIC_UNIT_SPU_2 = 0x3, - IIC_UNIT_SPU_3 = 0x8, - IIC_UNIT_SPU_4 = 0x2, - IIC_UNIT_SPU_5 = 0x9, - IIC_UNIT_SPU_6 = 0x1, - IIC_UNIT_SPU_7 = 0xa, - IIC_UNIT_IOC_0 = 0x0, - IIC_UNIT_IOC_1 = 0xb, - IIC_UNIT_THREAD_0 = 0xe, /* target only */ - IIC_UNIT_THREAD_1 = 0xf, /* target only */ - IIC_UNIT_IIC = 0xe, /* source only (IO exceptions) */ - - /* Base numbers for the external interrupts */ - IIC_IRQ_EXT_IOIF0 = - IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_0, - IIC_IRQ_EXT_IOIF1 = - IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_1, - - /* Base numbers for the IIC_ISR interrupts */ - IIC_IRQ_IOEX_TMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 63, - IIC_IRQ_IOEX_PMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 62, - IIC_IRQ_IOEX_ATI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 61, - IIC_IRQ_IOEX_MATBFI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 60, - IIC_IRQ_IOEX_ELDI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 59, - - /* Which bits in IIC_ISR are edge sensitive */ - IIC_ISR_EDGE_MASK = 0x4ul, -}; - -extern void iic_init_IRQ(void); -extern void iic_message_pass(int cpu, int msg); -extern void iic_request_IPIs(void); -extern void iic_setup_cpu(void); - -extern u8 iic_get_target_id(int cpu); - -extern void spider_init_IRQ(void); - -extern void iic_set_interrupt_routing(int cpu, int thread, int priority); - -#endif -#endif /* ASM_CELL_PIC_H */ diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c deleted file mode 100644 index 0ca3efeef293..000000000000 --- a/arch/powerpc/platforms/cell/iommu.c +++ /dev/null @@ -1,1092 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * IOMMU implementation for Cell Broadband Processor Architecture - * - * (C) Copyright IBM Corporation 2006-2008 - * - * Author: Jeremy Kerr <jk@ozlabs.org> - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <linux/notifier.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> -#include <linux/slab.h> -#include <linux/memblock.h> - -#include <asm/prom.h> -#include <asm/iommu.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <asm/udbg.h> -#include <asm/firmware.h> -#include <asm/cell-regs.h> - -#include "cell.h" -#include "interrupt.h" - -/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages - * instead of leaving them mapped to some dummy page. This can be - * enabled once the appropriate workarounds for spider bugs have - * been enabled - */ -#define CELL_IOMMU_REAL_UNMAP - -/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of - * IO PTEs based on the transfer direction. That can be enabled - * once spider-net has been fixed to pass the correct direction - * to the DMA mapping functions - */ -#define CELL_IOMMU_STRICT_PROTECTION - - -#define NR_IOMMUS 2 - -/* IOC mmap registers */ -#define IOC_Reg_Size 0x2000 - -#define IOC_IOPT_CacheInvd 0x908 -#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul -#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul -#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul - -#define IOC_IOST_Origin 0x918 -#define IOC_IOST_Origin_E 0x8000000000000000ul -#define IOC_IOST_Origin_HW 0x0000000000000800ul -#define IOC_IOST_Origin_HL 0x0000000000000400ul - -#define IOC_IO_ExcpStat 0x920 -#define IOC_IO_ExcpStat_V 0x8000000000000000ul -#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul -#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul -#define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul -#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul -#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul -#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful - -#define IOC_IO_ExcpMask 0x928 -#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul -#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul - -#define IOC_IOCmd_Offset 0x1000 - -#define IOC_IOCmd_Cfg 0xc00 -#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul - - -/* Segment table entries */ -#define IOSTE_V 0x8000000000000000ul /* valid */ -#define IOSTE_H 0x4000000000000000ul /* cache hint */ -#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */ -#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */ -#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */ -#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */ -#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */ -#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */ -#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */ - - -/* IOMMU sizing */ -#define IO_SEGMENT_SHIFT 28 -#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) - -/* The high bit needs to be set on every DMA address */ -#define SPIDER_DMA_OFFSET 0x80000000ul - -struct iommu_window { - struct list_head list; - struct cbe_iommu *iommu; - unsigned long offset; - unsigned long size; - unsigned int ioid; - struct iommu_table table; -}; - -#define NAMESIZE 8 -struct cbe_iommu { - int nid; - char name[NAMESIZE]; - void __iomem *xlate_regs; - void __iomem *cmd_regs; - unsigned long *stab; - unsigned long *ptab; - void *pad_page; - struct list_head windows; -}; - -/* Static array of iommus, one per node - * each contains a list of windows, keyed from dma_window property - * - on bus setup, look for a matching window, or create one - * - on dev setup, assign iommu_table ptr - */ -static struct cbe_iommu iommus[NR_IOMMUS]; -static int cbe_nr_iommus; - -static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, - long n_ptes) -{ - u64 __iomem *reg; - u64 val; - long n; - - reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; - - while (n_ptes > 0) { - /* we can invalidate up to 1 << 11 PTEs at once */ - n = min(n_ptes, 1l << 11); - val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask) - | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask) - | IOC_IOPT_CacheInvd_Busy; - - out_be64(reg, val); - while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy) - ; - - n_ptes -= n; - pte += n; - } -} - -static int tce_build_cell(struct iommu_table *tbl, long index, long npages, - unsigned long uaddr, enum dma_data_direction direction, - unsigned long attrs) -{ - int i; - unsigned long *io_pte, base_pte; - struct iommu_window *window = - container_of(tbl, struct iommu_window, table); - - /* implementing proper protection causes problems with the spidernet - * driver - check mapping directions later, but allow read & write by - * default for now.*/ -#ifdef CELL_IOMMU_STRICT_PROTECTION - /* to avoid referencing a global, we use a trick here to setup the - * protection bit. "prot" is setup to be 3 fields of 4 bits appended - * together for each of the 3 supported direction values. It is then - * shifted left so that the fields matching the desired direction - * lands on the appropriate bits, and other bits are masked out. - */ - const unsigned long prot = 0xc48; - base_pte = - ((prot << (52 + 4 * direction)) & - (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) | - CBE_IOPTE_M | CBE_IOPTE_SO_RW | - (window->ioid & CBE_IOPTE_IOID_Mask); -#else - base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | - CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); -#endif - if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING)) - base_pte &= ~CBE_IOPTE_SO_RW; - - io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); - - for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) - io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); - - mb(); - - invalidate_tce_cache(window->iommu, io_pte, npages); - - pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", - index, npages, direction, base_pte); - return 0; -} - -static void tce_free_cell(struct iommu_table *tbl, long index, long npages) -{ - - int i; - unsigned long *io_pte, pte; - struct iommu_window *window = - container_of(tbl, struct iommu_window, table); - - pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); - -#ifdef CELL_IOMMU_REAL_UNMAP - pte = 0; -#else - /* spider bridge does PCI reads after freeing - insert a mapping - * to a scratch page instead of an invalid entry */ - pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | - __pa(window->iommu->pad_page) | - (window->ioid & CBE_IOPTE_IOID_Mask); -#endif - - io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); - - for (i = 0; i < npages; i++) - io_pte[i] = pte; - - mb(); - - invalidate_tce_cache(window->iommu, io_pte, npages); -} - -static irqreturn_t ioc_interrupt(int irq, void *data) -{ - unsigned long stat, spf; - struct cbe_iommu *iommu = data; - - stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); - spf = stat & IOC_IO_ExcpStat_SPF_Mask; - - /* Might want to rate limit it */ - printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); - printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n", - !!(stat & IOC_IO_ExcpStat_V), - (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ', - (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ', - (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write", - (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask)); - printk(KERN_ERR " page=0x%016lx\n", - stat & IOC_IO_ExcpStat_ADDR_Mask); - - /* clear interrupt */ - stat &= ~IOC_IO_ExcpStat_V; - out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); - - return IRQ_HANDLED; -} - -static int __init cell_iommu_find_ioc(int nid, unsigned long *base) -{ - struct device_node *np; - struct resource r; - - *base = 0; - - /* First look for new style /be nodes */ - for_each_node_by_name(np, "ioc") { - if (of_node_to_nid(np) != nid) - continue; - if (of_address_to_resource(np, 0, &r)) { - printk(KERN_ERR "iommu: can't get address for %pOF\n", - np); - continue; - } - *base = r.start; - of_node_put(np); - return 0; - } - - /* Ok, let's try the old way */ - for_each_node_by_type(np, "cpu") { - const unsigned int *nidp; - const unsigned long *tmp; - - nidp = of_get_property(np, "node-id", NULL); - if (nidp && *nidp == nid) { - tmp = of_get_property(np, "ioc-translation", NULL); - if (tmp) { - *base = *tmp; - of_node_put(np); - return 0; - } - } - } - - return -ENODEV; -} - -static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu, - unsigned long dbase, unsigned long dsize, - unsigned long fbase, unsigned long fsize) -{ - struct page *page; - unsigned long segments, stab_size; - - segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; - - pr_debug("%s: iommu[%d]: segments: %lu\n", - __func__, iommu->nid, segments); - - /* set up the segment table */ - stab_size = segments * sizeof(unsigned long); - page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); - BUG_ON(!page); - iommu->stab = page_address(page); - memset(iommu->stab, 0, stab_size); -} - -static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu, - unsigned long base, unsigned long size, unsigned long gap_base, - unsigned long gap_size, unsigned long page_shift) -{ - struct page *page; - int i; - unsigned long reg, segments, pages_per_segment, ptab_size, - n_pte_pages, start_seg, *ptab; - - start_seg = base >> IO_SEGMENT_SHIFT; - segments = size >> IO_SEGMENT_SHIFT; - pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); - /* PTEs for each segment must start on a 4K boundary */ - pages_per_segment = max(pages_per_segment, - (1 << 12) / sizeof(unsigned long)); - - ptab_size = segments * pages_per_segment * sizeof(unsigned long); - pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, - iommu->nid, ptab_size, get_order(ptab_size)); - page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); - BUG_ON(!page); - - ptab = page_address(page); - memset(ptab, 0, ptab_size); - - /* number of 4K pages needed for a page table */ - n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; - - pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", - __func__, iommu->nid, iommu->stab, ptab, - n_pte_pages); - - /* initialise the STEs */ - reg = IOSTE_V | ((n_pte_pages - 1) << 5); - - switch (page_shift) { - case 12: reg |= IOSTE_PS_4K; break; - case 16: reg |= IOSTE_PS_64K; break; - case 20: reg |= IOSTE_PS_1M; break; - case 24: reg |= IOSTE_PS_16M; break; - default: BUG(); - } - - gap_base = gap_base >> IO_SEGMENT_SHIFT; - gap_size = gap_size >> IO_SEGMENT_SHIFT; - - pr_debug("Setting up IOMMU stab:\n"); - for (i = start_seg; i < (start_seg + segments); i++) { - if (i >= gap_base && i < (gap_base + gap_size)) { - pr_debug("\toverlap at %d, skipping\n", i); - continue; - } - iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * - (i - start_seg)); - pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); - } - - return ptab; -} - -static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu) -{ - int ret; - unsigned long reg, xlate_base; - unsigned int virq; - - if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) - panic("%s: missing IOC register mappings for node %d\n", - __func__, iommu->nid); - - iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); - iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; - - /* ensure that the STEs have updated */ - mb(); - - /* setup interrupts for the iommu. */ - reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); - out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, - reg & ~IOC_IO_ExcpStat_V); - out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, - IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); - - virq = irq_create_mapping(NULL, - IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); - BUG_ON(!virq); - - ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); - BUG_ON(ret); - - /* set the IOC segment table origin register (and turn on the iommu) */ - reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; - out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); - in_be64(iommu->xlate_regs + IOC_IOST_Origin); - - /* turn on IO translation */ - reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; - out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); -} - -static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu, - unsigned long base, unsigned long size) -{ - cell_iommu_setup_stab(iommu, base, size, 0, 0); - iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, - IOMMU_PAGE_SHIFT_4K); - cell_iommu_enable_hardware(iommu); -} - -#if 0/* Unused for now */ -static struct iommu_window *find_window(struct cbe_iommu *iommu, - unsigned long offset, unsigned long size) -{ - struct iommu_window *window; - - /* todo: check for overlapping (but not equal) windows) */ - - list_for_each_entry(window, &(iommu->windows), list) { - if (window->offset == offset && window->size == size) - return window; - } - - return NULL; -} -#endif - -static inline u32 cell_iommu_get_ioid(struct device_node *np) -{ - const u32 *ioid; - - ioid = of_get_property(np, "ioid", NULL); - if (ioid == NULL) { - printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", - np); - return 0; - } - - return *ioid; -} - -static struct iommu_table_ops cell_iommu_ops = { - .set = tce_build_cell, - .clear = tce_free_cell -}; - -static struct iommu_window * __init -cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, - unsigned long offset, unsigned long size, - unsigned long pte_offset) -{ - struct iommu_window *window; - struct page *page; - u32 ioid; - - ioid = cell_iommu_get_ioid(np); - - window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); - BUG_ON(window == NULL); - - window->offset = offset; - window->size = size; - window->ioid = ioid; - window->iommu = iommu; - - window->table.it_blocksize = 16; - window->table.it_base = (unsigned long)iommu->ptab; - window->table.it_index = iommu->nid; - window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; - window->table.it_offset = - (offset >> window->table.it_page_shift) + pte_offset; - window->table.it_size = size >> window->table.it_page_shift; - window->table.it_ops = &cell_iommu_ops; - - if (!iommu_init_table(&window->table, iommu->nid, 0, 0)) - panic("Failed to initialize iommu table"); - - pr_debug("\tioid %d\n", window->ioid); - pr_debug("\tblocksize %ld\n", window->table.it_blocksize); - pr_debug("\tbase 0x%016lx\n", window->table.it_base); - pr_debug("\toffset 0x%lx\n", window->table.it_offset); - pr_debug("\tsize %ld\n", window->table.it_size); - - list_add(&window->list, &iommu->windows); - - if (offset != 0) - return window; - - /* We need to map and reserve the first IOMMU page since it's used - * by the spider workaround. In theory, we only need to do that when - * running on spider but it doesn't really matter. - * - * This code also assumes that we have a window that starts at 0, - * which is the case on all spider based blades. - */ - page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); - BUG_ON(!page); - iommu->pad_page = page_address(page); - clear_page(iommu->pad_page); - - __set_bit(0, window->table.it_map); - tce_build_cell(&window->table, window->table.it_offset, 1, - (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); - - return window; -} - -static struct cbe_iommu *cell_iommu_for_node(int nid) -{ - int i; - - for (i = 0; i < cbe_nr_iommus; i++) - if (iommus[i].nid == nid) - return &iommus[i]; - return NULL; -} - -static unsigned long cell_dma_nommu_offset; - -static unsigned long dma_iommu_fixed_base; -static bool cell_iommu_enabled; - -/* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ -bool iommu_fixed_is_weak; - -static struct iommu_table *cell_get_iommu_table(struct device *dev) -{ - struct iommu_window *window; - struct cbe_iommu *iommu; - - /* Current implementation uses the first window available in that - * node's iommu. We -might- do something smarter later though it may - * never be necessary - */ - iommu = cell_iommu_for_node(dev_to_node(dev)); - if (iommu == NULL || list_empty(&iommu->windows)) { - dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", - dev->of_node, dev_to_node(dev)); - return NULL; - } - window = list_entry(iommu->windows.next, struct iommu_window, list); - - return &window->table; -} - -static u64 cell_iommu_get_fixed_address(struct device *dev); - -static void cell_dma_dev_setup(struct device *dev) -{ - if (cell_iommu_enabled) { - u64 addr = cell_iommu_get_fixed_address(dev); - - if (addr != OF_BAD_ADDR) - dev->archdata.dma_offset = addr + dma_iommu_fixed_base; - set_iommu_table_base(dev, cell_get_iommu_table(dev)); - } else { - dev->archdata.dma_offset = cell_dma_nommu_offset; - } -} - -static void cell_pci_dma_dev_setup(struct pci_dev *dev) -{ - cell_dma_dev_setup(&dev->dev); -} - -static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct device *dev = data; - - /* We are only interested in device addition */ - if (action != BUS_NOTIFY_ADD_DEVICE) - return 0; - - if (cell_iommu_enabled) - dev->dma_ops = &dma_iommu_ops; - cell_dma_dev_setup(dev); - return 0; -} - -static struct notifier_block cell_of_bus_notifier = { - .notifier_call = cell_of_bus_notify -}; - -static int __init cell_iommu_get_window(struct device_node *np, - unsigned long *base, - unsigned long *size) -{ - const __be32 *dma_window; - unsigned long index; - - /* Use ibm,dma-window if available, else, hard code ! */ - dma_window = of_get_property(np, "ibm,dma-window", NULL); - if (dma_window == NULL) { - *base = 0; - *size = 0x80000000u; - return -ENODEV; - } - - of_parse_dma_window(np, dma_window, &index, base, size); - return 0; -} - -static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) -{ - struct cbe_iommu *iommu; - int nid, i; - - /* Get node ID */ - nid = of_node_to_nid(np); - if (nid < 0) { - printk(KERN_ERR "iommu: failed to get node for %pOF\n", - np); - return NULL; - } - pr_debug("iommu: setting up iommu for node %d (%pOF)\n", - nid, np); - - /* XXX todo: If we can have multiple windows on the same IOMMU, which - * isn't the case today, we probably want here to check whether the - * iommu for that node is already setup. - * However, there might be issue with getting the size right so let's - * ignore that for now. We might want to completely get rid of the - * multiple window support since the cell iommu supports per-page ioids - */ - - if (cbe_nr_iommus >= NR_IOMMUS) { - printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", - np); - return NULL; - } - - /* Init base fields */ - i = cbe_nr_iommus++; - iommu = &iommus[i]; - iommu->stab = NULL; - iommu->nid = nid; - snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); - INIT_LIST_HEAD(&iommu->windows); - - return iommu; -} - -static void __init cell_iommu_init_one(struct device_node *np, - unsigned long offset) -{ - struct cbe_iommu *iommu; - unsigned long base, size; - - iommu = cell_iommu_alloc(np); - if (!iommu) - return; - - /* Obtain a window for it */ - cell_iommu_get_window(np, &base, &size); - - pr_debug("\ttranslating window 0x%lx...0x%lx\n", - base, base + size - 1); - - /* Initialize the hardware */ - cell_iommu_setup_hardware(iommu, base, size); - - /* Setup the iommu_table */ - cell_iommu_setup_window(iommu, np, base, size, - offset >> IOMMU_PAGE_SHIFT_4K); -} - -static void __init cell_disable_iommus(void) -{ - int node; - unsigned long base, val; - void __iomem *xregs, *cregs; - - /* Make sure IOC translation is disabled on all nodes */ - for_each_online_node(node) { - if (cell_iommu_find_ioc(node, &base)) - continue; - xregs = ioremap(base, IOC_Reg_Size); - if (xregs == NULL) - continue; - cregs = xregs + IOC_IOCmd_Offset; - - pr_debug("iommu: cleaning up iommu on node %d\n", node); - - out_be64(xregs + IOC_IOST_Origin, 0); - (void)in_be64(xregs + IOC_IOST_Origin); - val = in_be64(cregs + IOC_IOCmd_Cfg); - val &= ~IOC_IOCmd_Cfg_TE; - out_be64(cregs + IOC_IOCmd_Cfg, val); - (void)in_be64(cregs + IOC_IOCmd_Cfg); - - iounmap(xregs); - } -} - -static int __init cell_iommu_init_disabled(void) -{ - struct device_node *np = NULL; - unsigned long base = 0, size; - - /* When no iommu is present, we use direct DMA ops */ - - /* First make sure all IOC translation is turned off */ - cell_disable_iommus(); - - /* If we have no Axon, we set up the spider DMA magic offset */ - if (of_find_node_by_name(NULL, "axon") == NULL) - cell_dma_nommu_offset = SPIDER_DMA_OFFSET; - - /* Now we need to check to see where the memory is mapped - * in PCI space. We assume that all busses use the same dma - * window which is always the case so far on Cell, thus we - * pick up the first pci-internal node we can find and check - * the DMA window from there. - */ - for_each_node_by_name(np, "axon") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - if (cell_iommu_get_window(np, &base, &size) == 0) - break; - } - if (np == NULL) { - for_each_node_by_name(np, "pci-internal") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - if (cell_iommu_get_window(np, &base, &size) == 0) - break; - } - } - of_node_put(np); - - /* If we found a DMA window, we check if it's big enough to enclose - * all of physical memory. If not, we force enable IOMMU - */ - if (np && size < memblock_end_of_DRAM()) { - printk(KERN_WARNING "iommu: force-enabled, dma window" - " (%ldMB) smaller than total memory (%lldMB)\n", - size >> 20, memblock_end_of_DRAM() >> 20); - return -ENODEV; - } - - cell_dma_nommu_offset += base; - - if (cell_dma_nommu_offset != 0) - cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; - - printk("iommu: disabled, direct DMA offset is 0x%lx\n", - cell_dma_nommu_offset); - - return 0; -} - -/* - * Fixed IOMMU mapping support - * - * This code adds support for setting up a fixed IOMMU mapping on certain - * cell machines. For 64-bit devices this avoids the performance overhead of - * mapping and unmapping pages at runtime. 32-bit devices are unable to use - * the fixed mapping. - * - * The fixed mapping is established at boot, and maps all of physical memory - * 1:1 into device space at some offset. On machines with < 30 GB of memory - * we setup the fixed mapping immediately above the normal IOMMU window. - * - * For example a machine with 4GB of memory would end up with the normal - * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In - * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to - * 3GB, plus any offset required by firmware. The firmware offset is encoded - * in the "dma-ranges" property. - * - * On machines with 30GB or more of memory, we are unable to place the fixed - * mapping above the normal IOMMU window as we would run out of address space. - * Instead we move the normal IOMMU window to coincide with the hash page - * table, this region does not need to be part of the fixed mapping as no - * device should ever be DMA'ing to it. We then setup the fixed mapping - * from 0 to 32GB. - */ - -static u64 cell_iommu_get_fixed_address(struct device *dev) -{ - u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR; - struct device_node *np; - const u32 *ranges = NULL; - int i, len, best, naddr, nsize, pna, range_size; - - /* We can be called for platform devices that have no of_node */ - np = of_node_get(dev->of_node); - if (!np) - goto out; - - while (1) { - naddr = of_n_addr_cells(np); - nsize = of_n_size_cells(np); - np = of_get_next_parent(np); - if (!np) - break; - - ranges = of_get_property(np, "dma-ranges", &len); - - /* Ignore empty ranges, they imply no translation required */ - if (ranges && len > 0) - break; - } - - if (!ranges) { - dev_dbg(dev, "iommu: no dma-ranges found\n"); - goto out; - } - - len /= sizeof(u32); - - pna = of_n_addr_cells(np); - range_size = naddr + nsize + pna; - - /* dma-ranges format: - * child addr : naddr cells - * parent addr : pna cells - * size : nsize cells - */ - for (i = 0, best = -1, best_size = 0; i < len; i += range_size) { - cpu_addr = of_translate_dma_address(np, ranges + i + naddr); - size = of_read_number(ranges + i + naddr + pna, nsize); - - if (cpu_addr == 0 && size > best_size) { - best = i; - best_size = size; - } - } - - if (best >= 0) { - dev_addr = of_read_number(ranges + best, naddr); - } else - dev_dbg(dev, "iommu: no suitable range found!\n"); - -out: - of_node_put(np); - - return dev_addr; -} - -static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask) -{ - return mask == DMA_BIT_MASK(64) && - cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR; -} - -static void __init insert_16M_pte(unsigned long addr, unsigned long *ptab, - unsigned long base_pte) -{ - unsigned long segment, offset; - - segment = addr >> IO_SEGMENT_SHIFT; - offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); - ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); - - pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", - addr, ptab, segment, offset); - - ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask); -} - -static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, - struct device_node *np, unsigned long dbase, unsigned long dsize, - unsigned long fbase, unsigned long fsize) -{ - unsigned long base_pte, uaddr, ioaddr, *ptab; - - ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); - - dma_iommu_fixed_base = fbase; - - pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); - - base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | - (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask); - - if (iommu_fixed_is_weak) - pr_info("IOMMU: Using weak ordering for fixed mapping\n"); - else { - pr_info("IOMMU: Using strong ordering for fixed mapping\n"); - base_pte |= CBE_IOPTE_SO_RW; - } - - for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { - /* Don't touch the dynamic region */ - ioaddr = uaddr + fbase; - if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { - pr_debug("iommu: fixed/dynamic overlap, skipping\n"); - continue; - } - - insert_16M_pte(uaddr, ptab, base_pte); - } - - mb(); -} - -static int __init cell_iommu_fixed_mapping_init(void) -{ - unsigned long dbase, dsize, fbase, fsize, hbase, hend; - struct cbe_iommu *iommu; - struct device_node *np; - - /* The fixed mapping is only supported on axon machines */ - np = of_find_node_by_name(NULL, "axon"); - of_node_put(np); - - if (!np) { - pr_debug("iommu: fixed mapping disabled, no axons found\n"); - return -1; - } - - /* We must have dma-ranges properties for fixed mapping to work */ - np = of_find_node_with_property(NULL, "dma-ranges"); - of_node_put(np); - - if (!np) { - pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); - return -1; - } - - /* The default setup is to have the fixed mapping sit after the - * dynamic region, so find the top of the largest IOMMU window - * on any axon, then add the size of RAM and that's our max value. - * If that is > 32GB we have to do other shennanigans. - */ - fbase = 0; - for_each_node_by_name(np, "axon") { - cell_iommu_get_window(np, &dbase, &dsize); - fbase = max(fbase, dbase + dsize); - } - - fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT); - fsize = memblock_phys_mem_size(); - - if ((fbase + fsize) <= 0x800000000ul) - hbase = 0; /* use the device tree window */ - else { - /* If we're over 32 GB we need to cheat. We can't map all of - * RAM with the fixed mapping, and also fit the dynamic - * region. So try to place the dynamic region where the hash - * table sits, drivers never need to DMA to it, we don't - * need a fixed mapping for that area. - */ - if (!htab_address) { - pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); - return -1; - } - hbase = __pa(htab_address); - hend = hbase + htab_size_bytes; - - /* The window must start and end on a segment boundary */ - if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) || - (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) { - pr_debug("iommu: hash window not segment aligned\n"); - return -1; - } - - /* Check the hash window fits inside the real DMA window */ - for_each_node_by_name(np, "axon") { - cell_iommu_get_window(np, &dbase, &dsize); - - if (hbase < dbase || (hend > (dbase + dsize))) { - pr_debug("iommu: hash window doesn't fit in" - "real DMA window\n"); - of_node_put(np); - return -1; - } - } - - fbase = 0; - } - - /* Setup the dynamic regions */ - for_each_node_by_name(np, "axon") { - iommu = cell_iommu_alloc(np); - BUG_ON(!iommu); - - if (hbase == 0) - cell_iommu_get_window(np, &dbase, &dsize); - else { - dbase = hbase; - dsize = htab_size_bytes; - } - - printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " - "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, - dbase + dsize, fbase, fbase + fsize); - - cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); - iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, - IOMMU_PAGE_SHIFT_4K); - cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, - fbase, fsize); - cell_iommu_enable_hardware(iommu); - cell_iommu_setup_window(iommu, np, dbase, dsize, 0); - } - - cell_pci_controller_ops.iommu_bypass_supported = - cell_pci_iommu_bypass_supported; - return 0; -} - -static int iommu_fixed_disabled; - -static int __init setup_iommu_fixed(char *str) -{ - struct device_node *pciep; - - if (strcmp(str, "off") == 0) - iommu_fixed_disabled = 1; - - /* If we can find a pcie-endpoint in the device tree assume that - * we're on a triblade or a CAB so by default the fixed mapping - * should be set to be weakly ordered; but only if the boot - * option WASN'T set for strong ordering - */ - pciep = of_find_node_by_type(NULL, "pcie-endpoint"); - - if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) - iommu_fixed_is_weak = true; - - of_node_put(pciep); - - return 1; -} -__setup("iommu_fixed=", setup_iommu_fixed); - -static int __init cell_iommu_init(void) -{ - struct device_node *np; - - /* If IOMMU is disabled or we have little enough RAM to not need - * to enable it, we setup a direct mapping. - * - * Note: should we make sure we have the IOMMU actually disabled ? - */ - if (iommu_is_off || - (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull)) - if (cell_iommu_init_disabled() == 0) - goto bail; - - /* Setup various callbacks */ - cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; - - if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) - goto done; - - /* Create an iommu for each /axon node. */ - for_each_node_by_name(np, "axon") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - cell_iommu_init_one(np, 0); - } - - /* Create an iommu for each toplevel /pci-internal node for - * old hardware/firmware - */ - for_each_node_by_name(np, "pci-internal") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - cell_iommu_init_one(np, SPIDER_DMA_OFFSET); - } - done: - /* Setup default PCI iommu ops */ - set_pci_dma_ops(&dma_iommu_ops); - cell_iommu_enabled = true; - bail: - /* Register callbacks on OF platform device addition/removal - * to handle linking them to the right DMA operations - */ - bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier); - - return 0; -} -machine_arch_initcall(cell, cell_iommu_init); diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c deleted file mode 100644 index 58d967ee38b3..000000000000 --- a/arch/powerpc/platforms/cell/pervasive.c +++ /dev/null @@ -1,125 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CBE Pervasive Monitor and Debug - * - * (C) Copyright IBM Corporation 2005 - * - * Authors: Maximino Aguilar (maguilar@us.ibm.com) - * Michael N. Day (mnday@us.ibm.com) - */ - -#undef DEBUG - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/kallsyms.h> -#include <linux/pgtable.h> - -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/reg.h> -#include <asm/cell-regs.h> -#include <asm/cpu_has_feature.h> - -#include "pervasive.h" -#include "ras.h" - -static void cbe_power_save(void) -{ - unsigned long ctrl, thread_switch_control; - - /* Ensure our interrupt state is properly tracked */ - if (!prep_irq_for_idle()) - return; - - ctrl = mfspr(SPRN_CTRLF); - - /* Enable DEC and EE interrupt request */ - thread_switch_control = mfspr(SPRN_TSC_CELL); - thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; - - switch (ctrl & CTRL_CT) { - case CTRL_CT0: - thread_switch_control |= TSC_CELL_DEC_ENABLE_0; - break; - case CTRL_CT1: - thread_switch_control |= TSC_CELL_DEC_ENABLE_1; - break; - default: - printk(KERN_WARNING "%s: unknown configuration\n", - __func__); - break; - } - mtspr(SPRN_TSC_CELL, thread_switch_control); - - /* - * go into low thread priority, medium priority will be - * restored for us after wake-up. - */ - HMT_low(); - - /* - * atomically disable thread execution and runlatch. - * External and Decrementer exceptions are still handled when the - * thread is disabled but now enter in cbe_system_reset_exception() - */ - ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); - mtspr(SPRN_CTRLT, ctrl); - - /* Re-enable interrupts in MSR */ - __hard_irq_enable(); -} - -static int cbe_system_reset_exception(struct pt_regs *regs) -{ - switch (regs->msr & SRR1_WAKEMASK) { - case SRR1_WAKEDEC: - set_dec(1); - break; - case SRR1_WAKEEE: - /* - * Handle these when interrupts get re-enabled and we take - * them as regular exceptions. We are in an NMI context - * and can't handle these here. - */ - break; - case SRR1_WAKEMT: - return cbe_sysreset_hack(); -#ifdef CONFIG_CBE_RAS - case SRR1_WAKESYSERR: - cbe_system_error_exception(regs); - break; - case SRR1_WAKETHERM: - cbe_thermal_exception(regs); - break; -#endif /* CONFIG_CBE_RAS */ - default: - /* do system reset */ - return 0; - } - /* everything handled */ - return 1; -} - -void __init cbe_pervasive_init(void) -{ - int cpu; - - if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) - return; - - for_each_possible_cpu(cpu) { - struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu); - if (!regs) - continue; - - /* Enable Pause(0) control bit */ - out_be64(®s->pmcr, in_be64(®s->pmcr) | - CBE_PMD_PAUSE_ZERO_CONTROL); - } - - ppc_md.power_save = cbe_power_save; - ppc_md.system_reset_exception = cbe_system_reset_exception; -} diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h deleted file mode 100644 index 0da74ab10716..000000000000 --- a/arch/powerpc/platforms/cell/pervasive.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Cell Pervasive Monitor and Debug interface and HW structures - * - * (C) Copyright IBM Corporation 2005 - * - * Authors: Maximino Aguilar (maguilar@us.ibm.com) - * David J. Erb (djerb@us.ibm.com) - */ - - -#ifndef PERVASIVE_H -#define PERVASIVE_H - -extern void cbe_pervasive_init(void); - -#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON -extern int cbe_sysreset_hack(void); -#else -static inline int cbe_sysreset_hack(void) -{ - return 1; -} -#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */ - -#endif diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c deleted file mode 100644 index b207a7f99be5..000000000000 --- a/arch/powerpc/platforms/cell/pmu.c +++ /dev/null @@ -1,412 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cell Broadband Engine Performance Monitor - * - * (C) Copyright IBM Corporation 2001,2006 - * - * Author: - * David Erb (djerb@us.ibm.com) - * Kevin Corry (kevcorry@us.ibm.com) - */ - -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <linux/types.h> -#include <linux/export.h> -#include <asm/io.h> -#include <asm/irq_regs.h> -#include <asm/machdep.h> -#include <asm/pmc.h> -#include <asm/reg.h> -#include <asm/spu.h> -#include <asm/cell-regs.h> - -#include "interrupt.h" - -/* - * When writing to write-only mmio addresses, save a shadow copy. All of the - * registers are 32-bit, but stored in the upper-half of a 64-bit field in - * pmd_regs. - */ - -#define WRITE_WO_MMIO(reg, x) \ - do { \ - u32 _x = (x); \ - struct cbe_pmd_regs __iomem *pmd_regs; \ - struct cbe_pmd_shadow_regs *shadow_regs; \ - pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ - out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \ - shadow_regs->reg = _x; \ - } while (0) - -#define READ_SHADOW_REG(val, reg) \ - do { \ - struct cbe_pmd_shadow_regs *shadow_regs; \ - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ - (val) = shadow_regs->reg; \ - } while (0) - -#define READ_MMIO_UPPER32(val, reg) \ - do { \ - struct cbe_pmd_regs __iomem *pmd_regs; \ - pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ - (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \ - } while (0) - -/* - * Physical counter registers. - * Each physical counter can act as one 32-bit counter or two 16-bit counters. - */ - -u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr) -{ - u32 val_in_latch, val = 0; - - if (phys_ctr < NR_PHYS_CTRS) { - READ_SHADOW_REG(val_in_latch, counter_value_in_latch); - - /* Read the latch or the actual counter, whichever is newer. */ - if (val_in_latch & (1 << phys_ctr)) { - READ_SHADOW_REG(val, pm_ctr[phys_ctr]); - } else { - READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]); - } - } - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_phys_ctr); - -void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val) -{ - struct cbe_pmd_shadow_regs *shadow_regs; - u32 pm_ctrl; - - if (phys_ctr < NR_PHYS_CTRS) { - /* Writing to a counter only writes to a hardware latch. - * The new value is not propagated to the actual counter - * until the performance monitor is enabled. - */ - WRITE_WO_MMIO(pm_ctr[phys_ctr], val); - - pm_ctrl = cbe_read_pm(cpu, pm_control); - if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) { - /* The counters are already active, so we need to - * rewrite the pm_control register to "re-enable" - * the PMU. - */ - cbe_write_pm(cpu, pm_control, pm_ctrl); - } else { - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); - shadow_regs->counter_value_in_latch |= (1 << phys_ctr); - } - } -} -EXPORT_SYMBOL_GPL(cbe_write_phys_ctr); - -/* - * "Logical" counter registers. - * These will read/write 16-bits or 32-bits depending on the - * current size of the counter. Counters 4 - 7 are always 16-bit. - */ - -u32 cbe_read_ctr(u32 cpu, u32 ctr) -{ - u32 val; - u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1); - - val = cbe_read_phys_ctr(cpu, phys_ctr); - - if (cbe_get_ctr_size(cpu, phys_ctr) == 16) - val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff); - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_ctr); - -void cbe_write_ctr(u32 cpu, u32 ctr, u32 val) -{ - u32 phys_ctr; - u32 phys_val; - - phys_ctr = ctr & (NR_PHYS_CTRS - 1); - - if (cbe_get_ctr_size(cpu, phys_ctr) == 16) { - phys_val = cbe_read_phys_ctr(cpu, phys_ctr); - - if (ctr < NR_PHYS_CTRS) - val = (val << 16) | (phys_val & 0xffff); - else - val = (val & 0xffff) | (phys_val & 0xffff0000); - } - - cbe_write_phys_ctr(cpu, phys_ctr, val); -} -EXPORT_SYMBOL_GPL(cbe_write_ctr); - -/* - * Counter-control registers. - * Each "logical" counter has a corresponding control register. - */ - -u32 cbe_read_pm07_control(u32 cpu, u32 ctr) -{ - u32 pm07_control = 0; - - if (ctr < NR_CTRS) - READ_SHADOW_REG(pm07_control, pm07_control[ctr]); - - return pm07_control; -} -EXPORT_SYMBOL_GPL(cbe_read_pm07_control); - -void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val) -{ - if (ctr < NR_CTRS) - WRITE_WO_MMIO(pm07_control[ctr], val); -} -EXPORT_SYMBOL_GPL(cbe_write_pm07_control); - -/* - * Other PMU control registers. Most of these are write-only. - */ - -u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg) -{ - u32 val = 0; - - switch (reg) { - case group_control: - READ_SHADOW_REG(val, group_control); - break; - - case debug_bus_control: - READ_SHADOW_REG(val, debug_bus_control); - break; - - case trace_address: - READ_MMIO_UPPER32(val, trace_address); - break; - - case ext_tr_timer: - READ_SHADOW_REG(val, ext_tr_timer); - break; - - case pm_status: - READ_MMIO_UPPER32(val, pm_status); - break; - - case pm_control: - READ_SHADOW_REG(val, pm_control); - break; - - case pm_interval: - READ_MMIO_UPPER32(val, pm_interval); - break; - - case pm_start_stop: - READ_SHADOW_REG(val, pm_start_stop); - break; - } - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_pm); - -void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val) -{ - switch (reg) { - case group_control: - WRITE_WO_MMIO(group_control, val); - break; - - case debug_bus_control: - WRITE_WO_MMIO(debug_bus_control, val); - break; - - case trace_address: - WRITE_WO_MMIO(trace_address, val); - break; - - case ext_tr_timer: - WRITE_WO_MMIO(ext_tr_timer, val); - break; - - case pm_status: - WRITE_WO_MMIO(pm_status, val); - break; - - case pm_control: - WRITE_WO_MMIO(pm_control, val); - break; - - case pm_interval: - WRITE_WO_MMIO(pm_interval, val); - break; - - case pm_start_stop: - WRITE_WO_MMIO(pm_start_stop, val); - break; - } -} -EXPORT_SYMBOL_GPL(cbe_write_pm); - -/* - * Get/set the size of a physical counter to either 16 or 32 bits. - */ - -u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr) -{ - u32 pm_ctrl, size = 0; - - if (phys_ctr < NR_PHYS_CTRS) { - pm_ctrl = cbe_read_pm(cpu, pm_control); - size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32; - } - - return size; -} -EXPORT_SYMBOL_GPL(cbe_get_ctr_size); - -void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size) -{ - u32 pm_ctrl; - - if (phys_ctr < NR_PHYS_CTRS) { - pm_ctrl = cbe_read_pm(cpu, pm_control); - switch (ctr_size) { - case 16: - pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr); - break; - - case 32: - pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr); - break; - } - cbe_write_pm(cpu, pm_control, pm_ctrl); - } -} -EXPORT_SYMBOL_GPL(cbe_set_ctr_size); - -/* - * Enable/disable the entire performance monitoring unit. - * When we enable the PMU, all pending writes to counters get committed. - */ - -void cbe_enable_pm(u32 cpu) -{ - struct cbe_pmd_shadow_regs *shadow_regs; - u32 pm_ctrl; - - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); - shadow_regs->counter_value_in_latch = 0; - - pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON; - cbe_write_pm(cpu, pm_control, pm_ctrl); -} -EXPORT_SYMBOL_GPL(cbe_enable_pm); - -void cbe_disable_pm(u32 cpu) -{ - u32 pm_ctrl; - pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON; - cbe_write_pm(cpu, pm_control, pm_ctrl); -} -EXPORT_SYMBOL_GPL(cbe_disable_pm); - -/* - * Reading from the trace_buffer. - * The trace buffer is two 64-bit registers. Reading from - * the second half automatically increments the trace_address. - */ - -void cbe_read_trace_buffer(u32 cpu, u64 *buf) -{ - struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu); - - *buf++ = in_be64(&pmd_regs->trace_buffer_0_63); - *buf++ = in_be64(&pmd_regs->trace_buffer_64_127); -} -EXPORT_SYMBOL_GPL(cbe_read_trace_buffer); - -/* - * Enabling/disabling interrupts for the entire performance monitoring unit. - */ - -u32 cbe_get_and_clear_pm_interrupts(u32 cpu) -{ - /* Reading pm_status clears the interrupt bits. */ - return cbe_read_pm(cpu, pm_status); -} -EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts); - -void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask) -{ - /* Set which node and thread will handle the next interrupt. */ - iic_set_interrupt_routing(cpu, thread, 0); - - /* Enable the interrupt bits in the pm_status register. */ - if (mask) - cbe_write_pm(cpu, pm_status, mask); -} -EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts); - -void cbe_disable_pm_interrupts(u32 cpu) -{ - cbe_get_and_clear_pm_interrupts(cpu); - cbe_write_pm(cpu, pm_status, 0); -} -EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts); - -static irqreturn_t cbe_pm_irq(int irq, void *dev_id) -{ - perf_irq(get_irq_regs()); - return IRQ_HANDLED; -} - -static int __init cbe_init_pm_irq(void) -{ - unsigned int irq; - int rc, node; - - for_each_online_node(node) { - irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI | - (node << IIC_IRQ_NODE_SHIFT)); - if (!irq) { - printk("ERROR: Unable to allocate irq for node %d\n", - node); - return -EINVAL; - } - - rc = request_irq(irq, cbe_pm_irq, - 0, "cbe-pmu-0", NULL); - if (rc) { - printk("ERROR: Request for irq on node %d failed\n", - node); - return rc; - } - } - - return 0; -} -machine_arch_initcall(cell, cbe_init_pm_irq); - -void cbe_sync_irq(int node) -{ - unsigned int irq; - - irq = irq_find_mapping(NULL, - IIC_IRQ_IOEX_PMI - | (node << IIC_IRQ_NODE_SHIFT)); - - if (!irq) { - printk(KERN_WARNING "ERROR, unable to get existing irq %d " \ - "for node %d\n", irq, node); - return; - } - - synchronize_irq(irq); -} -EXPORT_SYMBOL_GPL(cbe_sync_irq); - diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c deleted file mode 100644 index 8d934ea6270c..000000000000 --- a/arch/powerpc/platforms/cell/ras.c +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2006-2008, IBM Corporation. - */ - -#undef DEBUG - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/smp.h> -#include <linux/reboot.h> -#include <linux/kexec.h> -#include <linux/crash_dump.h> -#include <linux/of.h> - -#include <asm/kexec.h> -#include <asm/reg.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/rtas.h> -#include <asm/cell-regs.h> - -#include "ras.h" - - -static void dump_fir(int cpu) -{ - struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu); - struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu); - - if (pregs == NULL) - return; - - /* Todo: do some nicer parsing of bits and based on them go down - * to other sub-units FIRs and not only IIC - */ - printk(KERN_ERR "Global Checkstop FIR : 0x%016llx\n", - in_be64(&pregs->checkstop_fir)); - printk(KERN_ERR "Global Recoverable FIR : 0x%016llx\n", - in_be64(&pregs->checkstop_fir)); - printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n", - in_be64(&pregs->spec_att_mchk_fir)); - - if (iregs == NULL) - return; - printk(KERN_ERR "IOC FIR : 0x%016llx\n", - in_be64(&iregs->ioc_fir)); - -} - -DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception) -{ - int cpu = smp_processor_id(); - - printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu); - dump_fir(cpu); - dump_stack(); -} - -DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception) -{ - int cpu = smp_processor_id(); - - /* - * Nothing implemented for the maintenance interrupt at this point - */ - - printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu); - dump_stack(); -} - -DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception) -{ - int cpu = smp_processor_id(); - - /* - * Nothing implemented for the thermal interrupt at this point - */ - - printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu); - dump_stack(); -} - -static int cbe_machine_check_handler(struct pt_regs *regs) -{ - int cpu = smp_processor_id(); - - printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu); - dump_fir(cpu); - - /* No recovery from this code now, lets continue */ - return 0; -} - -struct ptcal_area { - struct list_head list; - int nid; - int order; - struct page *pages; -}; - -static LIST_HEAD(ptcal_list); - -static int ptcal_start_tok, ptcal_stop_tok; - -static int __init cbe_ptcal_enable_on_node(int nid, int order) -{ - struct ptcal_area *area; - int ret = -ENOMEM; - unsigned long addr; - - if (is_kdump_kernel()) - rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); - - area = kmalloc(sizeof(*area), GFP_KERNEL); - if (!area) - goto out_err; - - area->nid = nid; - area->order = order; - area->pages = __alloc_pages_node(area->nid, - GFP_KERNEL|__GFP_THISNODE, - area->order); - - if (!area->pages) { - printk(KERN_WARNING "%s: no page on node %d\n", - __func__, area->nid); - goto out_free_area; - } - - /* - * We move the ptcal area to the middle of the allocated - * page, in order to avoid prefetches in memcpy and similar - * functions stepping on it. - */ - addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); - printk(KERN_DEBUG "%s: enabling PTCAL on node %d address=0x%016lx\n", - __func__, area->nid, addr); - - ret = -EIO; - if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid, - (unsigned int)(addr >> 32), - (unsigned int)(addr & 0xffffffff))) { - printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n", - __func__, nid); - goto out_free_pages; - } - - list_add(&area->list, &ptcal_list); - - return 0; - -out_free_pages: - __free_pages(area->pages, area->order); -out_free_area: - kfree(area); -out_err: - return ret; -} - -static int __init cbe_ptcal_enable(void) -{ - const u32 *size; - struct device_node *np; - int order, found_mic = 0; - - np = of_find_node_by_path("/rtas"); - if (!np) - return -ENODEV; - - size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); - if (!size) { - of_node_put(np); - return -ENODEV; - } - - pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); - order = get_order(*size); - of_node_put(np); - - /* support for malta device trees, with be@/mic@ nodes */ - for_each_node_by_type(np, "mic-tm") { - cbe_ptcal_enable_on_node(of_node_to_nid(np), order); - found_mic = 1; - } - - if (found_mic) - return 0; - - /* support for older device tree - use cpu nodes */ - for_each_node_by_type(np, "cpu") { - const u32 *nid = of_get_property(np, "node-id", NULL); - if (!nid) { - printk(KERN_ERR "%s: node %pOF is missing node-id?\n", - __func__, np); - continue; - } - cbe_ptcal_enable_on_node(*nid, order); - found_mic = 1; - } - - return found_mic ? 0 : -ENODEV; -} - -static int cbe_ptcal_disable(void) -{ - struct ptcal_area *area, *tmp; - int ret = 0; - - pr_debug("%s: disabling PTCAL\n", __func__); - - list_for_each_entry_safe(area, tmp, &ptcal_list, list) { - /* disable ptcal on this node */ - if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) { - printk(KERN_ERR "%s: error disabling PTCAL " - "on node %d!\n", __func__, - area->nid); - ret = -EIO; - continue; - } - - /* ensure we can access the PTCAL area */ - memset(page_address(area->pages), 0, - 1 << (area->order + PAGE_SHIFT)); - - /* clean up */ - list_del(&area->list); - __free_pages(area->pages, area->order); - kfree(area); - } - - return ret; -} - -static int cbe_ptcal_notify_reboot(struct notifier_block *nb, - unsigned long code, void *data) -{ - return cbe_ptcal_disable(); -} - -static void cbe_ptcal_crash_shutdown(void) -{ - cbe_ptcal_disable(); -} - -static struct notifier_block cbe_ptcal_reboot_notifier = { - .notifier_call = cbe_ptcal_notify_reboot -}; - -#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON -static int sysreset_hack; - -static int __init cbe_sysreset_init(void) -{ - struct cbe_pmd_regs __iomem *regs; - - sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0"); - if (!sysreset_hack) - return 0; - - regs = cbe_get_cpu_pmd_regs(0); - if (!regs) - return 0; - - /* Enable JTAG system-reset hack */ - out_be32(®s->fir_mode_reg, - in_be32(®s->fir_mode_reg) | - CBE_PMD_FIR_MODE_M8); - - return 0; -} -device_initcall(cbe_sysreset_init); - -int cbe_sysreset_hack(void) -{ - struct cbe_pmd_regs __iomem *regs; - - /* - * The BMC can inject user triggered system reset exceptions, - * but cannot set the system reset reason in srr1, - * so check an extra register here. - */ - if (sysreset_hack && (smp_processor_id() == 0)) { - regs = cbe_get_cpu_pmd_regs(0); - if (!regs) - return 0; - if (in_be64(®s->ras_esc_0) & 0x0000ffff) { - out_be64(®s->ras_esc_0, 0); - return 0; - } - } - return 1; -} -#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */ - -static int __init cbe_ptcal_init(void) -{ - int ret; - ptcal_start_tok = rtas_token("ibm,cbe-start-ptcal"); - ptcal_stop_tok = rtas_token("ibm,cbe-stop-ptcal"); - - if (ptcal_start_tok == RTAS_UNKNOWN_SERVICE - || ptcal_stop_tok == RTAS_UNKNOWN_SERVICE) - return -ENODEV; - - ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier); - if (ret) - goto out1; - - ret = crash_shutdown_register(&cbe_ptcal_crash_shutdown); - if (ret) - goto out2; - - return cbe_ptcal_enable(); - -out2: - unregister_reboot_notifier(&cbe_ptcal_reboot_notifier); -out1: - printk(KERN_ERR "Can't disable PTCAL, so not enabling\n"); - return ret; -} - -arch_initcall(cbe_ptcal_init); - -void __init cbe_ras_init(void) -{ - unsigned long hid0; - - /* - * Enable System Error & thermal interrupts and wakeup conditions - */ - - hid0 = mfspr(SPRN_HID0); - hid0 |= HID0_CBE_THERM_INT_EN | HID0_CBE_THERM_WAKEUP | - HID0_CBE_SYSERR_INT_EN | HID0_CBE_SYSERR_WAKEUP; - mtspr(SPRN_HID0, hid0); - mb(); - - /* - * Install machine check handler. Leave setting of precise mode to - * what the firmware did for now - */ - ppc_md.machine_check_exception = cbe_machine_check_handler; - mb(); - - /* - * For now, we assume that IOC_FIR is already set to forward some - * error conditions to the System Error handler. If that is not true - * then it will have to be fixed up here. - */ -} diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h deleted file mode 100644 index 226dbd48efad..000000000000 --- a/arch/powerpc/platforms/cell/ras.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef RAS_H -#define RAS_H - -#include <asm/interrupt.h> - -DECLARE_INTERRUPT_HANDLER(cbe_system_error_exception); -DECLARE_INTERRUPT_HANDLER(cbe_maintenance_exception); -DECLARE_INTERRUPT_HANDLER(cbe_thermal_exception); - -extern void cbe_ras_init(void); - -#endif /* RAS_H */ diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c deleted file mode 100644 index 52de014983c9..000000000000 --- a/arch/powerpc/platforms/cell/setup.c +++ /dev/null @@ -1,272 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * linux/arch/powerpc/platforms/cell/cell_setup.c - * - * Copyright (C) 1995 Linus Torvalds - * Adapted from 'alpha' version by Gary Thomas - * Modified by Cort Dougan (cort@cs.nmt.edu) - * Modified by PPC64 Team, IBM Corp - * Modified by Cell Team, IBM Deutschland Entwicklung GmbH - */ -#undef DEBUG - -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/export.h> -#include <linux/unistd.h> -#include <linux/user.h> -#include <linux/reboot.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/console.h> -#include <linux/mutex.h> -#include <linux/memory_hotplug.h> -#include <linux/of_platform.h> - -#include <asm/mmu.h> -#include <asm/processor.h> -#include <asm/io.h> -#include <asm/rtas.h> -#include <asm/pci-bridge.h> -#include <asm/iommu.h> -#include <asm/dma.h> -#include <asm/machdep.h> -#include <asm/time.h> -#include <asm/nvram.h> -#include <asm/cputable.h> -#include <asm/ppc-pci.h> -#include <asm/irq.h> -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/udbg.h> -#include <asm/mpic.h> -#include <asm/cell-regs.h> -#include <asm/io-workarounds.h> - -#include "cell.h" -#include "interrupt.h" -#include "pervasive.h" -#include "ras.h" - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -static void cell_show_cpuinfo(struct seq_file *m) -{ - struct device_node *root; - const char *model = ""; - - root = of_find_node_by_path("/"); - if (root) - model = of_get_property(root, "model", NULL); - seq_printf(m, "machine\t\t: CHRP %s\n", model); - of_node_put(root); -} - -static void cell_progress(char *s, unsigned short hex) -{ - printk("*** %04x : %s\n", hex, s ? s : ""); -} - -static void cell_fixup_pcie_rootcomplex(struct pci_dev *dev) -{ - struct pci_controller *hose; - const char *s; - int i; - - if (!machine_is(cell)) - return; - - /* We're searching for a direct child of the PHB */ - if (dev->bus->self != NULL || dev->devfn != 0) - return; - - hose = pci_bus_to_host(dev->bus); - if (hose == NULL) - return; - - /* Only on PCIE */ - if (!of_device_is_compatible(hose->dn, "pciex")) - return; - - /* And only on axon */ - s = of_get_property(hose->dn, "model", NULL); - if (!s || strcmp(s, "Axon") != 0) - return; - - for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { - dev->resource[i].start = dev->resource[i].end = 0; - dev->resource[i].flags = 0; - } - - printk(KERN_DEBUG "PCI: Hiding resources on Axon PCIE RC %s\n", - pci_name(dev)); -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, cell_fixup_pcie_rootcomplex); - -static int cell_setup_phb(struct pci_controller *phb) -{ - const char *model; - struct device_node *np; - - int rc = rtas_setup_phb(phb); - if (rc) - return rc; - - phb->controller_ops = cell_pci_controller_ops; - - np = phb->dn; - model = of_get_property(np, "model", NULL); - if (model == NULL || !of_node_name_eq(np, "pci")) - return 0; - - /* Setup workarounds for spider */ - if (strcmp(model, "Spider")) - return 0; - - iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, - (void *)SPIDER_PCI_REG_BASE); - return 0; -} - -static const struct of_device_id cell_bus_ids[] __initconst = { - { .type = "soc", }, - { .compatible = "soc", }, - { .type = "spider", }, - { .type = "axon", }, - { .type = "plb5", }, - { .type = "plb4", }, - { .type = "opb", }, - { .type = "ebc", }, - {}, -}; - -static int __init cell_publish_devices(void) -{ - struct device_node *root = of_find_node_by_path("/"); - struct device_node *np; - int node; - - /* Publish OF platform devices for southbridge IOs */ - of_platform_bus_probe(NULL, cell_bus_ids, NULL); - - /* On spider based blades, we need to manually create the OF - * platform devices for the PCI host bridges - */ - for_each_child_of_node(root, np) { - if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "pciex")) - continue; - of_platform_device_create(np, NULL, NULL); - } - - /* There is no device for the MIC memory controller, thus we create - * a platform device for it to attach the EDAC driver to. - */ - for_each_online_node(node) { - if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL) - continue; - platform_device_register_simple("cbe-mic", node, NULL, 0); - } - - return 0; -} -machine_subsys_initcall(cell, cell_publish_devices); - -static void __init mpic_init_IRQ(void) -{ - struct device_node *dn; - struct mpic *mpic; - - for_each_node_by_name(dn, "interrupt-controller") { - if (!of_device_is_compatible(dn, "CBEA,platform-open-pic")) - continue; - - /* The MPIC driver will get everything it needs from the - * device-tree, just pass 0 to all arguments - */ - mpic = mpic_alloc(dn, 0, MPIC_SECONDARY | MPIC_NO_RESET, - 0, 0, " MPIC "); - if (mpic == NULL) - continue; - mpic_init(mpic); - } -} - - -static void __init cell_init_irq(void) -{ - iic_init_IRQ(); - spider_init_IRQ(); - mpic_init_IRQ(); -} - -static void __init cell_set_dabrx(void) -{ - mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); -} - -static void __init cell_setup_arch(void) -{ -#ifdef CONFIG_SPU_BASE - spu_priv1_ops = &spu_priv1_mmio_ops; - spu_management_ops = &spu_management_of_ops; -#endif - - cbe_regs_init(); - - cell_set_dabrx(); - -#ifdef CONFIG_CBE_RAS - cbe_ras_init(); -#endif - -#ifdef CONFIG_SMP - smp_init_cell(); -#endif - /* init to some ~sane value until calibrate_delay() runs */ - loops_per_jiffy = 50000000; - - /* Find and initialize PCI host bridges */ - init_pci_config_tokens(); - - cbe_pervasive_init(); - - mmio_nvram_init(); -} - -static int __init cell_probe(void) -{ - if (!of_machine_is_compatible("IBM,CBEA") && - !of_machine_is_compatible("IBM,CPBW-1.0")) - return 0; - - pm_power_off = rtas_power_off; - - return 1; -} - -define_machine(cell) { - .name = "Cell", - .probe = cell_probe, - .setup_arch = cell_setup_arch, - .show_cpuinfo = cell_show_cpuinfo, - .restart = rtas_restart, - .halt = rtas_halt, - .get_boot_time = rtas_get_boot_time, - .get_rtc_time = rtas_get_rtc_time, - .set_rtc_time = rtas_set_rtc_time, - .calibrate_decr = generic_calibrate_decr, - .progress = cell_progress, - .init_IRQ = cell_init_irq, - .pci_setup_phb = cell_setup_phb, -}; - -struct pci_controller_ops cell_pci_controller_ops; diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c deleted file mode 100644 index 31ce00b52a32..000000000000 --- a/arch/powerpc/platforms/cell/smp.c +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * SMP support for BPA machines. - * - * Dave Engebretsen, Peter Bergner, and - * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com - * - * Plus various changes from other IBM teams... - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/cache.h> -#include <linux/err.h> -#include <linux/device.h> -#include <linux/cpu.h> -#include <linux/pgtable.h> - -#include <asm/ptrace.h> -#include <linux/atomic.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/io.h> -#include <asm/smp.h> -#include <asm/paca.h> -#include <asm/machdep.h> -#include <asm/cputable.h> -#include <asm/firmware.h> -#include <asm/rtas.h> -#include <asm/cputhreads.h> -#include <asm/code-patching.h> - -#include "interrupt.h" -#include <asm/udbg.h> - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -/* - * The Primary thread of each non-boot processor was started from the OF client - * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. - */ -static cpumask_t of_spin_map; - -/** - * smp_startup_cpu() - start the given cpu - * - * At boot time, there is nothing to do for primary threads which were - * started from Open Firmware. For anything else, call RTAS with the - * appropriate start location. - * - * Returns: - * 0 - failure - * 1 - success - */ -static inline int smp_startup_cpu(unsigned int lcpu) -{ - int status; - unsigned long start_here = - __pa(ppc_function_entry(generic_secondary_smp_init)); - unsigned int pcpu; - int start_cpu; - - if (cpumask_test_cpu(lcpu, &of_spin_map)) - /* Already started by OF and sitting in spin loop */ - return 1; - - pcpu = get_hard_smp_processor_id(lcpu); - - /* - * If the RTAS start-cpu token does not exist then presume the - * cpu is already spinning. - */ - start_cpu = rtas_token("start-cpu"); - if (start_cpu == RTAS_UNKNOWN_SERVICE) - return 1; - - status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); - if (status != 0) { - printk(KERN_ERR "start-cpu failed: %i\n", status); - return 0; - } - - return 1; -} - -static void smp_cell_setup_cpu(int cpu) -{ - if (cpu != boot_cpuid) - iic_setup_cpu(); - - /* - * change default DABRX to allow user watchpoints - */ - mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); -} - -static int smp_cell_kick_cpu(int nr) -{ - if (nr < 0 || nr >= nr_cpu_ids) - return -EINVAL; - - if (!smp_startup_cpu(nr)) - return -ENOENT; - - /* - * The processor is currently spinning, waiting for the - * cpu_start field to become non-zero After we set cpu_start, - * the processor will continue on to secondary_start - */ - paca_ptrs[nr]->cpu_start = 1; - - return 0; -} - -static struct smp_ops_t bpa_iic_smp_ops = { - .message_pass = iic_message_pass, - .probe = iic_request_IPIs, - .kick_cpu = smp_cell_kick_cpu, - .setup_cpu = smp_cell_setup_cpu, - .cpu_bootable = smp_generic_cpu_bootable, -}; - -/* This is called very early */ -void __init smp_init_cell(void) -{ - int i; - - DBG(" -> smp_init_cell()\n"); - - smp_ops = &bpa_iic_smp_ops; - - /* Mark threads which are still spinning in hold loops. */ - if (cpu_has_feature(CPU_FTR_SMT)) { - for_each_present_cpu(i) { - if (cpu_thread_in_core(i) == 0) - cpumask_set_cpu(i, &of_spin_map); - } - } else - cpumask_copy(&of_spin_map, cpu_present_mask); - - cpumask_clear_cpu(boot_cpuid, &of_spin_map); - - /* Non-lpar has additional take/give timebase */ - if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { - smp_ops->give_timebase = rtas_give_timebase; - smp_ops->take_timebase = rtas_take_timebase; - } - - DBG(" <- smp_init_cell()\n"); -} diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c deleted file mode 100644 index e36ebd84f55b..000000000000 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * IO workarounds for PCI on Celleb/Cell platform - * - * (C) Copyright 2006-2007 TOSHIBA CORPORATION - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> -#include <linux/slab.h> -#include <linux/io.h> - -#include <asm/ppc-pci.h> -#include <asm/pci-bridge.h> -#include <asm/io-workarounds.h> - -#define SPIDER_PCI_DISABLE_PREFETCH - -struct spiderpci_iowa_private { - void __iomem *regs; -}; - -static void spiderpci_io_flush(struct iowa_bus *bus) -{ - struct spiderpci_iowa_private *priv; - - priv = bus->private; - in_be32(priv->regs + SPIDER_PCI_DUMMY_READ); - iosync(); -} - -#define SPIDER_PCI_MMIO_READ(name, ret) \ -static ret spiderpci_##name(const PCI_IO_ADDR addr) \ -{ \ - ret val = __do_##name(addr); \ - spiderpci_io_flush(iowa_mem_find_bus(addr)); \ - return val; \ -} - -#define SPIDER_PCI_MMIO_READ_STR(name) \ -static void spiderpci_##name(const PCI_IO_ADDR addr, void *buf, \ - unsigned long count) \ -{ \ - __do_##name(addr, buf, count); \ - spiderpci_io_flush(iowa_mem_find_bus(addr)); \ -} - -SPIDER_PCI_MMIO_READ(readb, u8) -SPIDER_PCI_MMIO_READ(readw, u16) -SPIDER_PCI_MMIO_READ(readl, u32) -SPIDER_PCI_MMIO_READ(readq, u64) -SPIDER_PCI_MMIO_READ(readw_be, u16) -SPIDER_PCI_MMIO_READ(readl_be, u32) -SPIDER_PCI_MMIO_READ(readq_be, u64) -SPIDER_PCI_MMIO_READ_STR(readsb) -SPIDER_PCI_MMIO_READ_STR(readsw) -SPIDER_PCI_MMIO_READ_STR(readsl) - -static void spiderpci_memcpy_fromio(void *dest, const PCI_IO_ADDR src, - unsigned long n) -{ - __do_memcpy_fromio(dest, src, n); - spiderpci_io_flush(iowa_mem_find_bus(src)); -} - -static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, - void __iomem *regs) -{ - void *dummy_page_va; - dma_addr_t dummy_page_da; - -#ifdef SPIDER_PCI_DISABLE_PREFETCH - u32 val = in_be32(regs + SPIDER_PCI_VCI_CNTL_STAT); - pr_debug("SPIDER_IOWA:PVCI_Control_Status was 0x%08x\n", val); - out_be32(regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8); -#endif /* SPIDER_PCI_DISABLE_PREFETCH */ - - /* setup dummy read */ - /* - * On CellBlade, we can't know that which XDR memory is used by - * kmalloc() to allocate dummy_page_va. - * In order to improve the performance, the XDR which is used to - * allocate dummy_page_va is the nearest the spider-pci. - * We have to select the CBE which is the nearest the spider-pci - * to allocate memory from the best XDR, but I don't know that - * how to do. - * - * Celleb does not have this problem, because it has only one XDR. - */ - dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!dummy_page_va) { - pr_err("SPIDERPCI-IOWA:Alloc dummy_page_va failed.\n"); - return -1; - } - - dummy_page_da = dma_map_single(phb->parent, dummy_page_va, - PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(phb->parent, dummy_page_da)) { - pr_err("SPIDER-IOWA:Map dummy page filed.\n"); - kfree(dummy_page_va); - return -1; - } - - out_be32(regs + SPIDER_PCI_DUMMY_READ_BASE, dummy_page_da); - - return 0; -} - -int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data) -{ - void __iomem *regs = NULL; - struct spiderpci_iowa_private *priv; - struct device_node *np = bus->phb->dn; - struct resource r; - unsigned long offset = (unsigned long)data; - - pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n", - np); - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - pr_err("SPIDERPCI-IOWA:" - "Can't allocate struct spiderpci_iowa_private"); - return -1; - } - - if (of_address_to_resource(np, 0, &r)) { - pr_err("SPIDERPCI-IOWA:Can't get resource.\n"); - goto error; - } - - regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE); - if (!regs) { - pr_err("SPIDERPCI-IOWA:ioremap failed.\n"); - goto error; - } - priv->regs = regs; - bus->private = priv; - - if (spiderpci_pci_setup_chip(bus->phb, regs)) - goto error; - - return 0; - -error: - kfree(priv); - bus->private = NULL; - - if (regs) - iounmap(regs); - - return -1; -} - -struct ppc_pci_io spiderpci_ops = { - .readb = spiderpci_readb, - .readw = spiderpci_readw, - .readl = spiderpci_readl, - .readq = spiderpci_readq, - .readw_be = spiderpci_readw_be, - .readl_be = spiderpci_readl_be, - .readq_be = spiderpci_readq_be, - .readsb = spiderpci_readsb, - .readsw = spiderpci_readsw, - .readsl = spiderpci_readsl, - .memcpy_fromio = spiderpci_memcpy_fromio, -}; - diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c deleted file mode 100644 index 11df737c8c6a..000000000000 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ /dev/null @@ -1,344 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * External Interrupt Controller on Spider South Bridge - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Arnd Bergmann <arndb@de.ibm.com> - */ - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/ioport.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/pgtable.h> - -#include <asm/io.h> - -#include "interrupt.h" - -/* register layout taken from Spider spec, table 7.4-4 */ -enum { - TIR_DEN = 0x004, /* Detection Enable Register */ - TIR_MSK = 0x084, /* Mask Level Register */ - TIR_EDC = 0x0c0, /* Edge Detection Clear Register */ - TIR_PNDA = 0x100, /* Pending Register A */ - TIR_PNDB = 0x104, /* Pending Register B */ - TIR_CS = 0x144, /* Current Status Register */ - TIR_LCSA = 0x150, /* Level Current Status Register A */ - TIR_LCSB = 0x154, /* Level Current Status Register B */ - TIR_LCSC = 0x158, /* Level Current Status Register C */ - TIR_LCSD = 0x15c, /* Level Current Status Register D */ - TIR_CFGA = 0x200, /* Setting Register A0 */ - TIR_CFGB = 0x204, /* Setting Register B0 */ - /* 0x208 ... 0x3ff Setting Register An/Bn */ - TIR_PPNDA = 0x400, /* Packet Pending Register A */ - TIR_PPNDB = 0x404, /* Packet Pending Register B */ - TIR_PIERA = 0x408, /* Packet Output Error Register A */ - TIR_PIERB = 0x40c, /* Packet Output Error Register B */ - TIR_PIEN = 0x444, /* Packet Output Enable Register */ - TIR_PIPND = 0x454, /* Packet Output Pending Register */ - TIRDID = 0x484, /* Spider Device ID Register */ - REISTIM = 0x500, /* Reissue Command Timeout Time Setting */ - REISTIMEN = 0x504, /* Reissue Command Timeout Setting */ - REISWAITEN = 0x508, /* Reissue Wait Control*/ -}; - -#define SPIDER_CHIP_COUNT 4 -#define SPIDER_SRC_COUNT 64 -#define SPIDER_IRQ_INVALID 63 - -struct spider_pic { - struct irq_domain *host; - void __iomem *regs; - unsigned int node_id; -}; -static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; - -static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d) -{ - return irq_data_get_irq_chip_data(d); -} - -static void __iomem *spider_get_irq_config(struct spider_pic *pic, - unsigned int src) -{ - return pic->regs + TIR_CFGA + 8 * src; -} - -static void spider_unmask_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); - - out_be32(cfg, in_be32(cfg) | 0x30000000u); -} - -static void spider_mask_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); - - out_be32(cfg, in_be32(cfg) & ~0x30000000u); -} - -static void spider_ack_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - unsigned int src = irqd_to_hwirq(d); - - /* Reset edge detection logic if necessary - */ - if (irqd_is_level_type(d)) - return; - - /* Only interrupts 47 to 50 can be set to edge */ - if (src < 47 || src > 50) - return; - - /* Perform the clear of the edge logic */ - out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); -} - -static int spider_set_irq_type(struct irq_data *d, unsigned int type) -{ - unsigned int sense = type & IRQ_TYPE_SENSE_MASK; - struct spider_pic *pic = spider_irq_data_to_pic(d); - unsigned int hw = irqd_to_hwirq(d); - void __iomem *cfg = spider_get_irq_config(pic, hw); - u32 old_mask; - u32 ic; - - /* Note that only level high is supported for most interrupts */ - if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH && - (hw < 47 || hw > 50)) - return -EINVAL; - - /* Decode sense type */ - switch(sense) { - case IRQ_TYPE_EDGE_RISING: - ic = 0x3; - break; - case IRQ_TYPE_EDGE_FALLING: - ic = 0x2; - break; - case IRQ_TYPE_LEVEL_LOW: - ic = 0x0; - break; - case IRQ_TYPE_LEVEL_HIGH: - case IRQ_TYPE_NONE: - ic = 0x1; - break; - default: - return -EINVAL; - } - - /* Configure the source. One gross hack that was there before and - * that I've kept around is the priority to the BE which I set to - * be the same as the interrupt source number. I don't know whether - * that's supposed to make any kind of sense however, we'll have to - * decide that, but for now, I'm not changing the behaviour. - */ - old_mask = in_be32(cfg) & 0x30000000u; - out_be32(cfg, old_mask | (ic << 24) | (0x7 << 16) | - (pic->node_id << 4) | 0xe); - out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); - - return 0; -} - -static struct irq_chip spider_pic = { - .name = "SPIDER", - .irq_unmask = spider_unmask_irq, - .irq_mask = spider_mask_irq, - .irq_ack = spider_ack_irq, - .irq_set_type = spider_set_irq_type, -}; - -static int spider_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - irq_set_chip_data(virq, h->host_data); - irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); - - /* Set default irq type */ - irq_set_irq_type(virq, IRQ_TYPE_NONE); - - return 0; -} - -static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) - -{ - /* Spider interrupts have 2 cells, first is the interrupt source, - * second, well, I don't know for sure yet ... We mask the top bits - * because old device-trees encode a node number in there - */ - *out_hwirq = intspec[0] & 0x3f; - *out_flags = IRQ_TYPE_LEVEL_HIGH; - return 0; -} - -static const struct irq_domain_ops spider_host_ops = { - .map = spider_host_map, - .xlate = spider_host_xlate, -}; - -static void spider_irq_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct spider_pic *pic = irq_desc_get_handler_data(desc); - unsigned int cs; - - cs = in_be32(pic->regs + TIR_CS) >> 24; - if (cs != SPIDER_IRQ_INVALID) - generic_handle_domain_irq(pic->host, cs); - - chip->irq_eoi(&desc->irq_data); -} - -/* For hooking up the cascade we have a problem. Our device-tree is - * crap and we don't know on which BE iic interrupt we are hooked on at - * least not the "standard" way. We can reconstitute it based on two - * informations though: which BE node we are connected to and whether - * we are connected to IOIF0 or IOIF1. Right now, we really only care - * about the IBM cell blade and we know that its firmware gives us an - * interrupt-map property which is pretty strange. - */ -static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) -{ - unsigned int virq; - const u32 *imap, *tmp; - int imaplen, intsize, unit; - struct device_node *iic; - struct device_node *of_node; - - of_node = irq_domain_get_of_node(pic->host); - - /* First, we check whether we have a real "interrupts" in the device - * tree in case the device-tree is ever fixed - */ - virq = irq_of_parse_and_map(of_node, 0); - if (virq) - return virq; - - /* Now do the horrible hacks */ - tmp = of_get_property(of_node, "#interrupt-cells", NULL); - if (tmp == NULL) - return 0; - intsize = *tmp; - imap = of_get_property(of_node, "interrupt-map", &imaplen); - if (imap == NULL || imaplen < (intsize + 1)) - return 0; - iic = of_find_node_by_phandle(imap[intsize]); - if (iic == NULL) - return 0; - imap += intsize + 1; - tmp = of_get_property(iic, "#interrupt-cells", NULL); - if (tmp == NULL) { - of_node_put(iic); - return 0; - } - intsize = *tmp; - /* Assume unit is last entry of interrupt specifier */ - unit = imap[intsize - 1]; - /* Ok, we have a unit, now let's try to get the node */ - tmp = of_get_property(iic, "ibm,interrupt-server-ranges", NULL); - if (tmp == NULL) { - of_node_put(iic); - return 0; - } - /* ugly as hell but works for now */ - pic->node_id = (*tmp) >> 1; - of_node_put(iic); - - /* Ok, now let's get cracking. You may ask me why I just didn't match - * the iic host from the iic OF node, but that way I'm still compatible - * with really really old old firmwares for which we don't have a node - */ - /* Manufacture an IIC interrupt number of class 2 */ - virq = irq_create_mapping(NULL, - (pic->node_id << IIC_IRQ_NODE_SHIFT) | - (2 << IIC_IRQ_CLASS_SHIFT) | - unit); - if (!virq) - printk(KERN_ERR "spider_pic: failed to map cascade !"); - return virq; -} - - -static void __init spider_init_one(struct device_node *of_node, int chip, - unsigned long addr) -{ - struct spider_pic *pic = &spider_pics[chip]; - int i, virq; - - /* Map registers */ - pic->regs = ioremap(addr, 0x1000); - if (pic->regs == NULL) - panic("spider_pic: can't map registers !"); - - /* Allocate a host */ - pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT, - &spider_host_ops, pic); - if (pic->host == NULL) - panic("spider_pic: can't allocate irq host !"); - - /* Go through all sources and disable them */ - for (i = 0; i < SPIDER_SRC_COUNT; i++) { - void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; - out_be32(cfg, in_be32(cfg) & ~0x30000000u); - } - - /* do not mask any interrupts because of level */ - out_be32(pic->regs + TIR_MSK, 0x0); - - /* enable interrupt packets to be output */ - out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1); - - /* Hook up the cascade interrupt to the iic and nodeid */ - virq = spider_find_cascade_and_node(pic); - if (!virq) - return; - irq_set_handler_data(virq, pic); - irq_set_chained_handler(virq, spider_irq_cascade); - - printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %pOF\n", - pic->node_id, addr, of_node); - - /* Enable the interrupt detection enable bit. Do this last! */ - out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); -} - -void __init spider_init_IRQ(void) -{ - struct resource r; - struct device_node *dn; - int chip = 0; - - /* XXX node numbers are totally bogus. We _hope_ we get the device - * nodes in the right order here but that's definitely not guaranteed, - * we need to get the node from the device tree instead. - * There is currently no proper property for it (but our whole - * device-tree is bogus anyway) so all we can do is pray or maybe test - * the address and deduce the node-id - */ - for_each_node_by_name(dn, "interrupt-controller") { - if (of_device_is_compatible(dn, "CBEA,platform-spider-pic")) { - if (of_address_to_resource(dn, 0, &r)) { - printk(KERN_WARNING "spider-pic: Failed\n"); - continue; - } - } else if (of_device_is_compatible(dn, "sti,platform-spider-pic") - && (chip < 2)) { - static long hard_coded_pics[] = - { 0x24000008000ul, 0x34000008000ul}; - r.start = hard_coded_pics[chip]; - } else - continue; - spider_init_one(dn, chip++, r.start); - } -} diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 7bd0b563e163..2c07387201d0 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -23,7 +23,6 @@ #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/spu_csa.h> -#include <asm/xmon.h> #include <asm/kexec.h> const struct spu_management_ops *spu_management_ops; @@ -326,12 +325,6 @@ spu_irq_class_1(int irq, void *data) if (stat & CLASS1_STORAGE_FAULT_INTR) __spu_trap_data_map(spu, dar, dsisr); - if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR) - ; - - if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) - ; - spu->class_1_dsisr = 0; spu->class_1_dar = 0; @@ -778,7 +771,6 @@ static int __init init_spu_base(void) fb_append_extra_logo(&logo_spe_clut224, ret); mutex_lock(&spu_full_list_mutex); - xmon_register_spus(&spu_full_list); crash_register_spus(&spu_full_list); mutex_unlock(&spu_full_list_mutex); spu_add_dev_attr(&dev_attr_stat); diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index fe0d8797a00a..e780c14c5733 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -34,15 +34,15 @@ * mbind, mq_open, ipc, ... */ -static void *spu_syscall_table[] = { +static const syscall_fn spu_syscall_table[] = { #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) -#define __SYSCALL(nr, entry) [nr] = entry, +#define __SYSCALL(nr, entry) [nr] = (void *) entry, #include <asm/syscall_table_spu.h> }; long spu_sys_callback(struct spu_syscall_block *s) { - long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); + syscall_fn syscall; if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret); diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c deleted file mode 100644 index ae09c5a91b40..000000000000 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ /dev/null @@ -1,527 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * spu management operations for of based platforms - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * Copyright 2006 Sony Corp. - * (C) Copyright 2007 TOSHIBA CORPORATION - */ - -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/export.h> -#include <linux/ptrace.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/io.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> - -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/firmware.h> - -#include "spufs/spufs.h" -#include "interrupt.h" - -struct device_node *spu_devnode(struct spu *spu) -{ - return spu->devnode; -} - -EXPORT_SYMBOL_GPL(spu_devnode); - -static u64 __init find_spu_unit_number(struct device_node *spe) -{ - const unsigned int *prop; - int proplen; - - /* new device trees should provide the physical-id attribute */ - prop = of_get_property(spe, "physical-id", &proplen); - if (proplen == 4) - return (u64)*prop; - - /* celleb device tree provides the unit-id */ - prop = of_get_property(spe, "unit-id", &proplen); - if (proplen == 4) - return (u64)*prop; - - /* legacy device trees provide the id in the reg attribute */ - prop = of_get_property(spe, "reg", &proplen); - if (proplen == 4) - return (u64)*prop; - - return 0; -} - -static void spu_unmap(struct spu *spu) -{ - if (!firmware_has_feature(FW_FEATURE_LPAR)) - iounmap(spu->priv1); - iounmap(spu->priv2); - iounmap(spu->problem); - iounmap((__force u8 __iomem *)spu->local_store); -} - -static int __init spu_map_interrupts_old(struct spu *spu, - struct device_node *np) -{ - unsigned int isrc; - const u32 *tmp; - int nid; - - /* Get the interrupt source unit from the device-tree */ - tmp = of_get_property(np, "isrc", NULL); - if (!tmp) - return -ENODEV; - isrc = tmp[0]; - - tmp = of_get_property(np->parent->parent, "node-id", NULL); - if (!tmp) { - printk(KERN_WARNING "%s: can't find node-id\n", __func__); - nid = spu->node; - } else - nid = tmp[0]; - - /* Add the node number */ - isrc |= nid << IIC_IRQ_NODE_SHIFT; - - /* Now map interrupts of all 3 classes */ - spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); - spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); - spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); - - /* Right now, we only fail if class 2 failed */ - if (!spu->irqs[2]) - return -EINVAL; - - return 0; -} - -static void __iomem * __init spu_map_prop_old(struct spu *spu, - struct device_node *n, - const char *name) -{ - const struct address_prop { - unsigned long address; - unsigned int len; - } __attribute__((packed)) *prop; - int proplen; - - prop = of_get_property(n, name, &proplen); - if (prop == NULL || proplen != sizeof (struct address_prop)) - return NULL; - - return ioremap(prop->address, prop->len); -} - -static int __init spu_map_device_old(struct spu *spu) -{ - struct device_node *node = spu->devnode; - const char *prop; - int ret; - - ret = -ENODEV; - spu->name = of_get_property(node, "name", NULL); - if (!spu->name) - goto out; - - prop = of_get_property(node, "local-store", NULL); - if (!prop) - goto out; - spu->local_store_phys = *(unsigned long *)prop; - - /* we use local store as ram, not io memory */ - spu->local_store = (void __force *) - spu_map_prop_old(spu, node, "local-store"); - if (!spu->local_store) - goto out; - - prop = of_get_property(node, "problem", NULL); - if (!prop) - goto out_unmap; - spu->problem_phys = *(unsigned long *)prop; - - spu->problem = spu_map_prop_old(spu, node, "problem"); - if (!spu->problem) - goto out_unmap; - - spu->priv2 = spu_map_prop_old(spu, node, "priv2"); - if (!spu->priv2) - goto out_unmap; - - if (!firmware_has_feature(FW_FEATURE_LPAR)) { - spu->priv1 = spu_map_prop_old(spu, node, "priv1"); - if (!spu->priv1) - goto out_unmap; - } - - ret = 0; - goto out; - -out_unmap: - spu_unmap(spu); -out: - return ret; -} - -static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) -{ - int i; - - for (i=0; i < 3; i++) { - spu->irqs[i] = irq_of_parse_and_map(np, i); - if (!spu->irqs[i]) - goto err; - } - return 0; - -err: - pr_debug("failed to map irq %x for spu %s\n", i, spu->name); - for (; i >= 0; i--) { - if (spu->irqs[i]) - irq_dispose_mapping(spu->irqs[i]); - } - return -EINVAL; -} - -static int __init spu_map_resource(struct spu *spu, int nr, - void __iomem** virt, unsigned long *phys) -{ - struct device_node *np = spu->devnode; - struct resource resource = { }; - unsigned long len; - int ret; - - ret = of_address_to_resource(np, nr, &resource); - if (ret) - return ret; - if (phys) - *phys = resource.start; - len = resource_size(&resource); - *virt = ioremap(resource.start, len); - if (!*virt) - return -EINVAL; - return 0; -} - -static int __init spu_map_device(struct spu *spu) -{ - struct device_node *np = spu->devnode; - int ret = -ENODEV; - - spu->name = of_get_property(np, "name", NULL); - if (!spu->name) - goto out; - - ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, - &spu->local_store_phys); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 0\n", - np); - goto out; - } - ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, - &spu->problem_phys); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 1\n", - np); - goto out_unmap; - } - ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 2\n", - np); - goto out_unmap; - } - if (!firmware_has_feature(FW_FEATURE_LPAR)) - ret = spu_map_resource(spu, 3, - (void __iomem**)&spu->priv1, NULL); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 3\n", - np); - goto out_unmap; - } - pr_debug("spu_new: %pOF maps:\n", np); - pr_debug(" local store : 0x%016lx -> 0x%p\n", - spu->local_store_phys, spu->local_store); - pr_debug(" problem state : 0x%016lx -> 0x%p\n", - spu->problem_phys, spu->problem); - pr_debug(" priv2 : 0x%p\n", spu->priv2); - pr_debug(" priv1 : 0x%p\n", spu->priv1); - - return 0; - -out_unmap: - spu_unmap(spu); -out: - pr_debug("failed to map spe %s: %d\n", spu->name, ret); - return ret; -} - -static int __init of_enumerate_spus(int (*fn)(void *data)) -{ - int ret; - struct device_node *node; - unsigned int n = 0; - - ret = -ENODEV; - for_each_node_by_type(node, "spe") { - ret = fn(node); - if (ret) { - printk(KERN_WARNING "%s: Error initializing %pOFn\n", - __func__, node); - of_node_put(node); - break; - } - n++; - } - return ret ? ret : n; -} - -static int __init of_create_spu(struct spu *spu, void *data) -{ - int ret; - struct device_node *spe = (struct device_node *)data; - static int legacy_map = 0, legacy_irq = 0; - - spu->devnode = of_node_get(spe); - spu->spe_id = find_spu_unit_number(spe); - - spu->node = of_node_to_nid(spe); - if (spu->node >= MAX_NUMNODES) { - printk(KERN_WARNING "SPE %pOF on node %d ignored," - " node number too big\n", spe, spu->node); - printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); - ret = -ENODEV; - goto out; - } - - ret = spu_map_device(spu); - if (ret) { - if (!legacy_map) { - legacy_map = 1; - printk(KERN_WARNING "%s: Legacy device tree found, " - "trying to map old style\n", __func__); - } - ret = spu_map_device_old(spu); - if (ret) { - printk(KERN_ERR "Unable to map %s\n", - spu->name); - goto out; - } - } - - ret = spu_map_interrupts(spu, spe); - if (ret) { - if (!legacy_irq) { - legacy_irq = 1; - printk(KERN_WARNING "%s: Legacy device tree found, " - "trying old style irq\n", __func__); - } - ret = spu_map_interrupts_old(spu, spe); - if (ret) { - printk(KERN_ERR "%s: could not map interrupts\n", - spu->name); - goto out_unmap; - } - } - - pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name, - spu->local_store, spu->problem, spu->priv1, - spu->priv2, spu->number); - goto out; - -out_unmap: - spu_unmap(spu); -out: - return ret; -} - -static int of_destroy_spu(struct spu *spu) -{ - spu_unmap(spu); - of_node_put(spu->devnode); - return 0; -} - -static void enable_spu_by_master_run(struct spu_context *ctx) -{ - ctx->ops->master_start(ctx); -} - -static void disable_spu_by_master_run(struct spu_context *ctx) -{ - ctx->ops->master_stop(ctx); -} - -/* Hardcoded affinity idxs for qs20 */ -#define QS20_SPES_PER_BE 8 -static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; -static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; - -static struct spu *__init spu_lookup_reg(int node, u32 reg) -{ - struct spu *spu; - const u32 *spu_reg; - - list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { - spu_reg = of_get_property(spu_devnode(spu), "reg", NULL); - if (*spu_reg == reg) - return spu; - } - return NULL; -} - -static void __init init_affinity_qs20_harcoded(void) -{ - int node, i; - struct spu *last_spu, *spu; - u32 reg; - - for (node = 0; node < MAX_NUMNODES; node++) { - last_spu = NULL; - for (i = 0; i < QS20_SPES_PER_BE; i++) { - reg = qs20_reg_idxs[i]; - spu = spu_lookup_reg(node, reg); - if (!spu) - continue; - spu->has_mem_affinity = qs20_reg_memory[reg]; - if (last_spu) - list_add_tail(&spu->aff_list, - &last_spu->aff_list); - last_spu = spu; - } - } -} - -static int __init of_has_vicinity(void) -{ - struct device_node *dn; - - for_each_node_by_type(dn, "spe") { - if (of_find_property(dn, "vicinity", NULL)) { - of_node_put(dn); - return 1; - } - } - return 0; -} - -static struct spu *__init devnode_spu(int cbe, struct device_node *dn) -{ - struct spu *spu; - - list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) - if (spu_devnode(spu) == dn) - return spu; - return NULL; -} - -static struct spu * __init -neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid) -{ - struct spu *spu; - struct device_node *spu_dn; - const phandle *vic_handles; - int lenp, i; - - list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { - spu_dn = spu_devnode(spu); - if (spu_dn == avoid) - continue; - vic_handles = of_get_property(spu_dn, "vicinity", &lenp); - for (i=0; i < (lenp / sizeof(phandle)); i++) { - if (vic_handles[i] == target->phandle) - return spu; - } - } - return NULL; -} - -static void __init init_affinity_node(int cbe) -{ - struct spu *spu, *last_spu; - struct device_node *vic_dn, *last_spu_dn; - phandle avoid_ph; - const phandle *vic_handles; - int lenp, i, added; - - last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, - cbe_list); - avoid_ph = 0; - for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { - last_spu_dn = spu_devnode(last_spu); - vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp); - - /* - * Walk through each phandle in vicinity property of the spu - * (typically two vicinity phandles per spe node) - */ - for (i = 0; i < (lenp / sizeof(phandle)); i++) { - if (vic_handles[i] == avoid_ph) - continue; - - vic_dn = of_find_node_by_phandle(vic_handles[i]); - if (!vic_dn) - continue; - - if (of_node_name_eq(vic_dn, "spe") ) { - spu = devnode_spu(cbe, vic_dn); - avoid_ph = last_spu_dn->phandle; - } else { - /* - * "mic-tm" and "bif0" nodes do not have - * vicinity property. So we need to find the - * spe which has vic_dn as neighbour, but - * skipping the one we came from (last_spu_dn) - */ - spu = neighbour_spu(cbe, vic_dn, last_spu_dn); - if (!spu) - continue; - if (of_node_name_eq(vic_dn, "mic-tm")) { - last_spu->has_mem_affinity = 1; - spu->has_mem_affinity = 1; - } - avoid_ph = vic_dn->phandle; - } - - list_add_tail(&spu->aff_list, &last_spu->aff_list); - last_spu = spu; - break; - } - } -} - -static void __init init_affinity_fw(void) -{ - int cbe; - - for (cbe = 0; cbe < MAX_NUMNODES; cbe++) - init_affinity_node(cbe); -} - -static int __init init_affinity(void) -{ - if (of_has_vicinity()) { - init_affinity_fw(); - } else { - if (of_machine_is_compatible("IBM,CPBW-1.0")) - init_affinity_qs20_harcoded(); - else - printk("No affinity configuration found\n"); - } - - return 0; -} - -const struct spu_management_ops spu_management_of_ops = { - .enumerate_spus = of_enumerate_spus, - .create_spu = of_create_spu, - .destroy_spu = of_destroy_spu, - .enable_spu = enable_spu_by_master_run, - .disable_spu = disable_spu_by_master_run, - .init_affinity = init_affinity, -}; diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c deleted file mode 100644 index d150e3987304..000000000000 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ /dev/null @@ -1,167 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * spu hypervisor abstraction for direct hardware access. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * Copyright 2006 Sony Corp. - */ - -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/ptrace.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/io.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/sched.h> - -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/firmware.h> - -#include "interrupt.h" -#include "spu_priv1_mmio.h" - -static void int_mask_and(struct spu *spu, int class, u64 mask) -{ - u64 old_mask; - - old_mask = in_be64(&spu->priv1->int_mask_RW[class]); - out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); -} - -static void int_mask_or(struct spu *spu, int class, u64 mask) -{ - u64 old_mask; - - old_mask = in_be64(&spu->priv1->int_mask_RW[class]); - out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); -} - -static void int_mask_set(struct spu *spu, int class, u64 mask) -{ - out_be64(&spu->priv1->int_mask_RW[class], mask); -} - -static u64 int_mask_get(struct spu *spu, int class) -{ - return in_be64(&spu->priv1->int_mask_RW[class]); -} - -static void int_stat_clear(struct spu *spu, int class, u64 stat) -{ - out_be64(&spu->priv1->int_stat_RW[class], stat); -} - -static u64 int_stat_get(struct spu *spu, int class) -{ - return in_be64(&spu->priv1->int_stat_RW[class]); -} - -static void cpu_affinity_set(struct spu *spu, int cpu) -{ - u64 target; - u64 route; - - if (nr_cpus_node(spu->node)) { - const struct cpumask *spumask = cpumask_of_node(spu->node), - *cpumask = cpumask_of_node(cpu_to_node(cpu)); - - if (!cpumask_intersects(spumask, cpumask)) - return; - } - - target = iic_get_target_id(cpu); - route = target << 48 | target << 32 | target << 16; - out_be64(&spu->priv1->int_route_RW, route); -} - -static u64 mfc_dar_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_dar_RW); -} - -static u64 mfc_dsisr_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_dsisr_RW); -} - -static void mfc_dsisr_set(struct spu *spu, u64 dsisr) -{ - out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); -} - -static void mfc_sdr_setup(struct spu *spu) -{ - out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); -} - -static void mfc_sr1_set(struct spu *spu, u64 sr1) -{ - out_be64(&spu->priv1->mfc_sr1_RW, sr1); -} - -static u64 mfc_sr1_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_sr1_RW); -} - -static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) -{ - out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); -} - -static u64 mfc_tclass_id_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_tclass_id_RW); -} - -static void tlb_invalidate(struct spu *spu) -{ - out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); -} - -static void resource_allocation_groupID_set(struct spu *spu, u64 id) -{ - out_be64(&spu->priv1->resource_allocation_groupID_RW, id); -} - -static u64 resource_allocation_groupID_get(struct spu *spu) -{ - return in_be64(&spu->priv1->resource_allocation_groupID_RW); -} - -static void resource_allocation_enable_set(struct spu *spu, u64 enable) -{ - out_be64(&spu->priv1->resource_allocation_enable_RW, enable); -} - -static u64 resource_allocation_enable_get(struct spu *spu) -{ - return in_be64(&spu->priv1->resource_allocation_enable_RW); -} - -const struct spu_priv1_ops spu_priv1_mmio_ops = -{ - .int_mask_and = int_mask_and, - .int_mask_or = int_mask_or, - .int_mask_set = int_mask_set, - .int_mask_get = int_mask_get, - .int_stat_clear = int_stat_clear, - .int_stat_get = int_stat_get, - .cpu_affinity_set = cpu_affinity_set, - .mfc_dar_get = mfc_dar_get, - .mfc_dsisr_get = mfc_dsisr_get, - .mfc_dsisr_set = mfc_dsisr_set, - .mfc_sdr_setup = mfc_sdr_setup, - .mfc_sr1_set = mfc_sr1_set, - .mfc_sr1_get = mfc_sr1_get, - .mfc_tclass_id_set = mfc_tclass_id_set, - .mfc_tclass_id_get = mfc_tclass_id_get, - .tlb_invalidate = tlb_invalidate, - .resource_allocation_groupID_set = resource_allocation_groupID_set, - .resource_allocation_groupID_get = resource_allocation_groupID_get, - .resource_allocation_enable_set = resource_allocation_enable_set, - .resource_allocation_enable_get = resource_allocation_enable_get, -}; diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h deleted file mode 100644 index 04f0db339dc1..000000000000 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * spu hypervisor abstraction for direct hardware access. - * - * Copyright (C) 2006 Sony Computer Entertainment Inc. - * Copyright 2006 Sony Corp. - */ - -#ifndef SPU_PRIV1_MMIO_H -#define SPU_PRIV1_MMIO_H - -struct device_node *spu_devnode(struct spu *spu); - -#endif /* SPU_PRIV1_MMIO_H */ diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index 87ad7d563cfa..000894e07b02 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -36,6 +36,9 @@ static inline struct spufs_calls *spufs_calls_get(void) static inline void spufs_calls_put(struct spufs_calls *calls) { + if (!calls) + return; + BUG_ON(calls != spufs_calls); /* we don't need to rcu this, as we hold a reference to the module */ @@ -53,82 +56,55 @@ static inline void spufs_calls_put(struct spufs_calls *calls) { } #endif /* CONFIG_SPU_FS_MODULE */ +DEFINE_CLASS(spufs_calls, struct spufs_calls *, spufs_calls_put(_T), spufs_calls_get(), void) + SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, umode_t, mode, int, neighbor_fd) { - long ret; - struct spufs_calls *calls; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return -ENOSYS; if (flags & SPU_CREATE_AFFINITY_SPU) { - struct fd neighbor = fdget(neighbor_fd); - ret = -EBADF; - if (neighbor.file) { - ret = calls->create_thread(name, flags, mode, neighbor.file); - fdput(neighbor); - } - } else - ret = calls->create_thread(name, flags, mode, NULL); - - spufs_calls_put(calls); - return ret; + CLASS(fd, neighbor)(neighbor_fd); + if (fd_empty(neighbor)) + return -EBADF; + return calls->create_thread(name, flags, mode, fd_file(neighbor)); + } else { + return calls->create_thread(name, flags, mode, NULL); + } } SYSCALL_DEFINE3(spu_run,int, fd, __u32 __user *, unpc, __u32 __user *, ustatus) { - long ret; - struct fd arg; - struct spufs_calls *calls; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return -ENOSYS; - ret = -EBADF; - arg = fdget(fd); - if (arg.file) { - ret = calls->spu_run(arg.file, unpc, ustatus); - fdput(arg); - } + CLASS(fd, arg)(fd); + if (fd_empty(arg)) + return -EBADF; - spufs_calls_put(calls); - return ret; + return calls->spu_run(fd_file(arg), unpc, ustatus); } #ifdef CONFIG_COREDUMP int elf_coredump_extra_notes_size(void) { - struct spufs_calls *calls; - int ret; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return 0; - ret = calls->coredump_extra_notes_size(); - - spufs_calls_put(calls); - - return ret; + return calls->coredump_extra_notes_size(); } int elf_coredump_extra_notes_write(struct coredump_params *cprm) { - struct spufs_calls *calls; - int ret; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return 0; - ret = calls->coredump_extra_notes_write(cprm); - - spufs_calls_put(calls); - - return ret; + return calls->coredump_extra_notes_write(cprm); } #endif diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 1a587618015c..301ee7d8b7df 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -66,18 +66,19 @@ static int match_context(const void *v, struct file *file, unsigned fd) */ static struct spu_context *coredump_next_context(int *fd) { - struct spu_context *ctx; + struct spu_context *ctx = NULL; struct file *file; int n = iterate_fd(current->files, *fd, match_context, NULL); if (!n) return NULL; *fd = n - 1; - rcu_read_lock(); - file = lookup_fd_rcu(*fd); - ctx = SPUFS_I(file_inode(file))->i_ctx; - get_spu_context(ctx); - rcu_read_unlock(); + file = fget_raw(*fd); + if (file) { + ctx = SPUFS_I(file_inode(file))->i_ctx; + get_spu_context(ctx); + fput(file); + } return ctx; } diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 62d90a5e23d1..d5a2c77bc908 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -291,7 +291,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP; + vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); vma->vm_ops = &spufs_mem_mmap_vmops; @@ -381,7 +381,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP; + vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_cntl_mmap_vmops; @@ -453,7 +453,6 @@ static const struct file_operations spufs_cntl_fops = { .release = spufs_cntl_release, .read = simple_attr_read, .write = simple_attr_write, - .llseek = no_llseek, .mmap = spufs_cntl_mmap, }; @@ -634,7 +633,6 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, static const struct file_operations spufs_mbox_fops = { .open = spufs_pipe_open, .read = spufs_mbox_read, - .llseek = no_llseek, }; static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, @@ -664,7 +662,6 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_mbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_mbox_stat_read, - .llseek = no_llseek, }; /* low-level ibox access function */ @@ -769,7 +766,6 @@ static const struct file_operations spufs_ibox_fops = { .open = spufs_pipe_open, .read = spufs_ibox_read, .poll = spufs_ibox_poll, - .llseek = no_llseek, }; static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, @@ -797,7 +793,6 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_ibox_stat_fops = { .open = spufs_pipe_open, .read = spufs_ibox_stat_read, - .llseek = no_llseek, }; /* low-level mailbox write */ @@ -901,7 +896,6 @@ static const struct file_operations spufs_wbox_fops = { .open = spufs_pipe_open, .write = spufs_wbox_write, .poll = spufs_wbox_poll, - .llseek = no_llseek, }; static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, @@ -929,7 +923,6 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_wbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_wbox_stat_read, - .llseek = no_llseek, }; static int spufs_signal1_open(struct inode *inode, struct file *file) @@ -1043,7 +1036,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP; + vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_signal1_mmap_vmops; @@ -1056,7 +1049,6 @@ static const struct file_operations spufs_signal1_fops = { .read = spufs_signal1_read, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, - .llseek = no_llseek, }; static const struct file_operations spufs_signal1_nosched_fops = { @@ -1064,7 +1056,6 @@ static const struct file_operations spufs_signal1_nosched_fops = { .release = spufs_signal1_release, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, - .llseek = no_llseek, }; static int spufs_signal2_open(struct inode *inode, struct file *file) @@ -1179,7 +1170,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP; + vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_signal2_mmap_vmops; @@ -1195,7 +1186,6 @@ static const struct file_operations spufs_signal2_fops = { .read = spufs_signal2_read, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, - .llseek = no_llseek, }; static const struct file_operations spufs_signal2_nosched_fops = { @@ -1203,7 +1193,6 @@ static const struct file_operations spufs_signal2_nosched_fops = { .release = spufs_signal2_release, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, - .llseek = no_llseek, }; /* @@ -1302,7 +1291,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP; + vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_mss_mmap_vmops; @@ -1343,7 +1332,6 @@ static const struct file_operations spufs_mss_fops = { .open = spufs_mss_open, .release = spufs_mss_release, .mmap = spufs_mss_mmap, - .llseek = no_llseek, }; static vm_fault_t @@ -1364,7 +1352,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP; + vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_psmap_mmap_vmops; @@ -1401,7 +1389,6 @@ static const struct file_operations spufs_psmap_fops = { .open = spufs_psmap_open, .release = spufs_psmap_release, .mmap = spufs_psmap_mmap, - .llseek = no_llseek, }; @@ -1424,7 +1411,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP; + vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_mfc_mmap_vmops; @@ -1704,23 +1691,11 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id) ret = spu_acquire(ctx); if (ret) - goto out; -#if 0 -/* this currently hangs */ - ret = spufs_wait(ctx->mfc_wq, - ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); - if (ret) - goto out; - ret = spufs_wait(ctx->mfc_wq, - ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); - if (ret) - goto out; -#else - ret = 0; -#endif + return ret; + spu_release(ctx); -out: - return ret; + + return 0; } static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) @@ -1744,7 +1719,6 @@ static const struct file_operations spufs_mfc_fops = { .flush = spufs_mfc_flush, .fsync = spufs_mfc_fsync, .mmap = spufs_mfc_mmap, - .llseek = no_llseek, }; static int spufs_npc_set(void *data, u64 val) @@ -2114,7 +2088,6 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, static const struct file_operations spufs_dma_info_fops = { .open = spufs_info_open, .read = spufs_dma_info_read, - .llseek = no_llseek, }; static void spufs_get_proxydma_info(struct spu_context *ctx, @@ -2171,7 +2144,6 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, static const struct file_operations spufs_proxydma_info_fops = { .open = spufs_info_open, .read = spufs_proxydma_info_read, - .llseek = no_llseek, }; static int spufs_show_tid(struct seq_file *s, void *private) @@ -2454,7 +2426,6 @@ static const struct file_operations spufs_switch_log_fops = { .read = spufs_switch_log_read, .poll = spufs_switch_log_poll, .release = spufs_switch_log_release, - .llseek = no_llseek, }; /** diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c index 827d338deaf4..2c2999de6bfa 100644 --- a/arch/powerpc/platforms/cell/spufs/gang.c +++ b/arch/powerpc/platforms/cell/spufs/gang.c @@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void) mutex_init(&gang->aff_mutex); INIT_LIST_HEAD(&gang->list); INIT_LIST_HEAD(&gang->aff_list_head); + gang->alive = 1; out: return gang; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 34334c32b7f5..9f9e4b871627 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -86,13 +86,13 @@ spufs_new_inode(struct super_block *sb, umode_t mode) inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); + simple_inode_init_ts(inode); out: return inode; } static int -spufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, +spufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); @@ -100,7 +100,7 @@ spufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, if ((attr->ia_valid & ATTR_SIZE) && (attr->ia_size != inode->i_size)) return -EINVAL; - setattr_copy(&init_user_ns, inode, attr); + setattr_copy(&nop_mnt_idmap, inode, attr); mark_inode_dirty(inode); return 0; } @@ -145,10 +145,11 @@ spufs_evict_inode(struct inode *inode) static void spufs_prune_dir(struct dentry *dir) { - struct dentry *dentry, *tmp; + struct dentry *dentry; + struct hlist_node *n; inode_lock(d_inode(dir)); - list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { + hlist_for_each_entry_safe(dentry, n, &dir->d_children, d_sib) { spin_lock(&dentry->d_lock); if (simple_positive(dentry)) { dget_dlock(dentry); @@ -191,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir, return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); - if (ret) + if (ret) { + dput(dentry); return ret; + } files++; } return 0; } +static void unuse_gang(struct dentry *dir) +{ + struct inode *inode = dir->d_inode; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; + + if (gang) { + bool dead; + + inode_lock(inode); // exclusion with spufs_create_context() + dead = !--gang->alive; + inode_unlock(inode); + + if (dead) + simple_recursive_removal(dir, NULL); + } +} + static int spufs_dir_close(struct inode *inode, struct file *file) { struct inode *parent; @@ -212,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) inode_unlock(parent); WARN_ON(ret); + unuse_gang(dir->d_parent); return dcache_dir_close(inode, file); } @@ -237,7 +258,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, if (!inode) return -ENOSPC; - inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR); + inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR); ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */ SPUFS_I(inode)->i_ctx = ctx; if (!ctx) { @@ -275,7 +296,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, return ret; } -static int spufs_context_open(struct path *path) +static int spufs_context_open(const struct path *path) { int ret; struct file *filp; @@ -404,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, { int ret; int affinity; - struct spu_gang *gang; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; @@ -419,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) return -ENODEV; - gang = NULL; + if (gang) { + if (!gang->alive) + return -ENOENT; + gang->alive++; + } + neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { - gang = SPUFS_I(inode)->i_gang; if (!gang) return -EINVAL; mutex_lock(&gang->aff_mutex); @@ -435,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, } ret = spufs_mkdir(inode, dentry, flags, mode & 0777); - if (ret) + if (ret) { + if (neighbor) + put_spu_context(neighbor); goto out_aff_unlock; + } if (affinity) { spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, @@ -452,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); + if (ret && gang) + gang->alive--; // can't reach 0 return ret; } @@ -468,7 +498,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) goto out; ret = 0; - inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR); + inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR); gang = alloc_spu_gang(); SPUFS_I(inode)->i_ctx = NULL; SPUFS_I(inode)->i_gang = gang; @@ -481,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_fop = &simple_dir_operations; d_instantiate(dentry, inode); + dget(dentry); inc_nlink(dir); inc_nlink(d_inode(dentry)); return ret; @@ -491,7 +522,22 @@ out: return ret; } -static int spufs_gang_open(struct path *path) +static int spufs_gang_close(struct inode *inode, struct file *file) +{ + unuse_gang(file->f_path.dentry); + return dcache_dir_close(inode, file); +} + +static const struct file_operations spufs_gang_fops = { + .open = dcache_dir_open, + .release = spufs_gang_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .iterate_shared = dcache_readdir, + .fsync = noop_fsync, +}; + +static int spufs_gang_open(const struct path *path) { int ret; struct file *filp; @@ -510,7 +556,7 @@ static int spufs_gang_open(struct path *path) return PTR_ERR(filp); } - filp->f_op = &simple_dir_operations; + filp->f_op = &spufs_gang_fops; fd_install(ret, filp); return ret; } @@ -525,10 +571,8 @@ static int spufs_create_gang(struct inode *inode, ret = spufs_mkgang(inode, dentry, mode & 0777); if (!ret) { ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); - } + if (ret < 0) + unuse_gang(dentry); } return ret; } @@ -536,7 +580,7 @@ static int spufs_create_gang(struct inode *inode, static struct file_system_type spufs_type; -long spufs_create(struct path *path, struct dentry *dentry, +long spufs_create(const struct path *path, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp) { struct inode *dir = d_inode(path->dentry); @@ -660,6 +704,7 @@ spufs_init_isolated_loader(void) return; loader = of_get_property(dn, "loader", &size); + of_node_put(dn); if (!loader) return; @@ -820,6 +865,7 @@ static void __exit spufs_exit(void) } module_exit(spufs_exit); +MODULE_DESCRIPTION("SPU file system"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 99bd027a7f7c..8e7ed010bfde 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -508,7 +508,7 @@ static void __spu_del_from_rq(struct spu_context *ctx) if (!list_empty(&ctx->rq)) { if (!--spu_prio->nr_waiting) - del_timer(&spusched_timer); + timer_delete(&spusched_timer); list_del_init(&ctx->rq); if (list_empty(&spu_prio->runq[prio])) @@ -868,7 +868,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) } /** - * spu_deactivate - unbind a context from it's physical spu + * spu_deactivate - unbind a context from its physical spu * @ctx: spu context to unbind * * Unbind @ctx from the physical spu it is running on and schedule @@ -1126,8 +1126,8 @@ void spu_sched_exit(void) remove_proc_entry("spu_loadavg", NULL); - del_timer_sync(&spusched_timer); - del_timer_sync(&spuloadavg_timer); + timer_delete_sync(&spusched_timer); + timer_delete_sync(&spuloadavg_timer); kthread_stop(spusched_task); for (node = 0; node < MAX_NUMNODES; node++) { diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index afc1d6604d12..d33787c57c39 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -76,7 +76,7 @@ struct spu_context { struct address_space *mss; /* 'mss' area mappings. */ struct address_space *psmap; /* 'psmap' area mappings. */ struct mutex mapping_lock; - u64 object_id; /* user space pointer for oprofile */ + u64 object_id; /* user space pointer for GNU Debugger */ enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; struct mutex state_mutex; @@ -151,6 +151,8 @@ struct spu_gang { int aff_flags; struct spu *aff_ref_spu; atomic_t aff_sched_count; + + int alive; }; /* Flag bits for spu_gang aff_flags */ @@ -232,7 +234,7 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[]; extern struct spufs_calls spufs_calls; struct coredump_params; long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); -long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags, +long spufs_create(const struct path *nd, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp); /* ELF coredump callbacks for writing SPU ELF notes */ extern int spufs_coredump_extra_notes_size(void); @@ -333,7 +335,6 @@ void spufs_stop_callback(struct spu *spu, int irq); void spufs_mfc_callback(struct spu *spu); void spufs_dma_callback(struct spu *spu, int type); -extern struct spu_coredump_calls spufs_coredump_calls; struct spufs_coredump_reader { char *name; ssize_t (*dump)(struct spu_context *ctx, struct coredump_params *cprm); @@ -341,7 +342,6 @@ struct spufs_coredump_reader { size_t size; }; extern const struct spufs_coredump_reader spufs_coredump_read[]; -extern int spufs_coredump_num_notes; extern int spu_init_csa(struct spu_state *csa); extern void spu_fini_csa(struct spu_state *csa); diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h index a5a7c338caf9..6ff4631d9db4 100644 --- a/arch/powerpc/platforms/chrp/chrp.h +++ b/arch/powerpc/platforms/chrp/chrp.h @@ -9,4 +9,3 @@ extern int chrp_set_rtc_time(struct rtc_time *); extern long chrp_time_init(void); extern void chrp_find_bridges(void); -extern void chrp_event_scan(unsigned long); diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index dab78076fedb..d3bf56a46656 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -31,7 +31,7 @@ static unsigned char chrp_nvram_read_val(int addr) return 0xff; } spin_lock_irqsave(&nvram_lock, flags); - if ((rtas_call(rtas_token("nvram-fetch"), 3, 2, &done, addr, + if ((rtas_call(rtas_function_token(RTAS_FN_NVRAM_FETCH), 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) ret = 0xff; else @@ -53,7 +53,7 @@ static void chrp_nvram_write_val(int addr, unsigned char val) } spin_lock_irqsave(&nvram_lock, flags); nvram_buf[0] = val; - if ((rtas_call(rtas_token("nvram-store"), 3, 2, &done, addr, + if ((rtas_call(rtas_function_token(RTAS_FN_NVRAM_STORE), 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr); spin_unlock_irqrestore(&nvram_lock, flags); @@ -92,4 +92,5 @@ void __init chrp_nvram_init(void) return; } +MODULE_DESCRIPTION("PPC NVRAM device driver"); MODULE_LICENSE("GPL v2"); diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 6f6598e771ff..428fd2a7b3ee 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -104,7 +104,7 @@ static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int ret = -1; int rval; - rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len); + rval = rtas_call(rtas_function_token(RTAS_FN_READ_PCI_CONFIG), 2, 2, &ret, addr, len); *val = ret; return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; } @@ -118,7 +118,7 @@ static int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset | (hose->global_number << 24); int rval; - rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL, + rval = rtas_call(rtas_function_token(RTAS_FN_WRITE_PCI_CONFIG), 3, 1, NULL, addr, len, val); return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; } diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index 5c4f1a9ca154..6f4a41a9352a 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c @@ -14,7 +14,7 @@ #include <linux/ioport.h> #include <linux/device.h> #include <linux/platform_device.h> -#include <linux/mv643xx.h> +#include <linux/mv643xx_eth.h> #include <linux/pci.h> #define PEGASOS2_MARVELL_REGBASE (0xf1000000) @@ -25,12 +25,15 @@ #define PEGASOS2_SRAM_BASE_ETH_PORT0 (PEGASOS2_SRAM_BASE) #define PEGASOS2_SRAM_BASE_ETH_PORT1 (PEGASOS2_SRAM_BASE_ETH_PORT0 + (PEGASOS2_SRAM_SIZE / 2) ) - #define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #undef BE_VERBOSE +#define MV64340_BASE_ADDR_ENABLE 0x278 +#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 +#define MV64340_SRAM_CONFIG 0x380 + static struct resource mv643xx_eth_shared_resources[] = { [0] = { .name = "ethernet shared base", diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index ec63c0558db6..c1bfa4c3444c 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -323,11 +323,11 @@ static void __init chrp_setup_arch(void) printk("chrp type = %x [%s]\n", _chrp_type, chrp_names[_chrp_type]); rtas_initialize(); - if (rtas_token("display-character") >= 0) + if (rtas_function_token(RTAS_FN_DISPLAY_CHARACTER) >= 0) ppc_md.progress = rtas_progress; /* use RTAS time-of-day routines if available */ - if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) { + if (rtas_function_token(RTAS_FN_GET_TIME_OF_DAY) != RTAS_UNKNOWN_SERVICE) { ppc_md.get_boot_time = rtas_get_boot_time; ppc_md.get_rtc_time = rtas_get_rtc_time; ppc_md.set_rtc_time = rtas_set_rtc_time; @@ -486,7 +486,7 @@ static void __init chrp_find_8259(void) i8259_init(pic, chrp_int_ack); if (ppc_md.get_irq == NULL) { ppc_md.get_irq = i8259_irq; - irq_set_default_host(i8259_get_host()); + irq_set_default_domain(i8259_get_host()); } if (chrp_mpic != NULL) { cascade_irq = irq_of_parse_and_map(pic, 0); @@ -582,6 +582,5 @@ define_machine(chrp) { .time_init = chrp_time_init, .set_rtc_time = chrp_set_rtc_time, .get_rtc_time = chrp_get_rtc_time, - .calibrate_decr = generic_calibrate_decr, .phys_mem_access_prot = pci_phys_mem_access_prot, }; diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig index c54786f8461e..c6adff216fe6 100644 --- a/arch/powerpc/platforms/embedded6xx/Kconfig +++ b/arch/powerpc/platforms/embedded6xx/Kconfig @@ -10,7 +10,7 @@ config LINKSTATION select FSL_SOC select PPC_UDBG_16550 if SERIAL_8250 select DEFAULT_UIMAGE - select MPC10X_BRIDGE + imply MPC10X_BRIDGE if PCI help Select LINKSTATION if configuring for one of PPC- (MPC8241) based NAS systems from Buffalo Technology. So far only @@ -24,21 +24,11 @@ config STORCENTER select MPIC select FSL_SOC select PPC_UDBG_16550 if SERIAL_8250 - select MPC10X_BRIDGE + imply MPC10X_BRIDGE if PCI help Select STORCENTER if configuring for the iomega StorCenter with an 8241 CPU in it. -config MPC7448HPC2 - bool "Freescale MPC7448HPC2(Taiga)" - depends on EMBEDDED6xx - select TSI108_BRIDGE - select DEFAULT_UIMAGE - select PPC_UDBG_16550 - help - Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga) - platform - config PPC_HOLLY bool "PPC750GX/CL with TSI10x bridge (Hickory/Holly)" depends on EMBEDDED6xx diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile index e656ae9f23c6..7f2a8154e5a0 100644 --- a/arch/powerpc/platforms/embedded6xx/Makefile +++ b/arch/powerpc/platforms/embedded6xx/Makefile @@ -2,7 +2,6 @@ # # Makefile for the 6xx/7xx/7xxxx linux kernel. # -obj-$(CONFIG_MPC7448HPC2) += mpc7448_hpc2.o obj-$(CONFIG_LINKSTATION) += linkstation.o ls_uart.o obj-$(CONFIG_STORCENTER) += storcenter.o obj-$(CONFIG_PPC_HOLLY) += holly.o diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index 609bda2ad5dd..91a8f0a7086e 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -145,12 +145,13 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np) } io_base = ioremap(res.start, resource_size(&res)); - pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base); + pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base); __flipper_quiesce(io_base); - irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS, - &flipper_irq_domain_ops, io_base); + irq_domain = irq_domain_create_linear(of_fwnode_handle(np), + FLIPPER_NR_IRQS, + &flipper_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); return NULL; @@ -172,7 +173,7 @@ unsigned int flipper_pic_get_irq(void) return 0; /* no more IRQs pending */ irq = __ffs(irq_status); - return irq_linear_revmap(flipper_irq_host, irq); + return irq_find_mapping(flipper_irq_host, irq); } /* @@ -190,7 +191,7 @@ void __init flipper_pic_probe(void) flipper_irq_host = flipper_pic_init(np); BUG_ON(!flipper_irq_host); - irq_set_default_host(flipper_irq_host); + irq_set_default_domain(flipper_irq_host); of_node_put(np); } diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c index 5c2575adcc7e..e3b2c7464732 100644 --- a/arch/powerpc/platforms/embedded6xx/gamecube.c +++ b/arch/powerpc/platforms/embedded6xx/gamecube.c @@ -50,9 +50,6 @@ static void __noreturn gamecube_halt(void) static int __init gamecube_probe(void) { - if (!of_machine_is_compatible("nintendo,gamecube")) - return 0; - pm_power_off = gamecube_power_off; ug_udbg_init(); @@ -67,12 +64,12 @@ static void gamecube_shutdown(void) define_machine(gamecube) { .name = "gamecube", + .compatible = "nintendo,gamecube", .probe = gamecube_probe, .restart = gamecube_restart, .halt = gamecube_halt, .init_IRQ = flipper_pic_probe, .get_irq = flipper_pic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, .machine_shutdown = gamecube_shutdown, }; @@ -85,11 +82,8 @@ static const struct of_device_id gamecube_of_bus[] = { static int __init gamecube_device_probe(void) { - if (!machine_is(gamecube)) - return 0; - of_platform_bus_probe(NULL, gamecube_of_bus, NULL); return 0; } -device_initcall(gamecube_device_probe); +machine_device_initcall(gamecube, gamecube_device_probe); diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 380b4285cce4..b57e87b0b3ce 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -171,12 +171,13 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np) return NULL; } - pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base); + pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base); __hlwd_quiesce(io_base); - irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS, - &hlwd_irq_domain_ops, io_base); + irq_domain = irq_domain_create_linear(of_fwnode_handle(np), + HLWD_NR_IRQS, + &hlwd_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); iounmap(io_base); @@ -189,7 +190,7 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np) unsigned int hlwd_pic_get_irq(void) { unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host); - return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0; + return hwirq ? irq_find_mapping(hlwd_irq_host, hwirq) : 0; } /* diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index 78f2378d9223..ce9e58ee9754 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -22,9 +22,9 @@ #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_core.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> #include <linux/extable.h> #include <asm/time.h> @@ -123,6 +123,8 @@ static void __init holly_init_pci(void) if (np) tsi108_setup_pci(np, HOLLY_PCI_CFG_PHYS, 1); + of_node_put(np); + ppc_md.pci_exclude_device = holly_exclude_device; if (ppc_md.progress) ppc_md.progress("tsi108: resources set", 0x100); @@ -184,6 +186,9 @@ static void __init holly_init_IRQ(void) tsi108_pci_int_init(cascade_node); irq_set_handler_data(cascade_pci_irq, mpic); irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); + + of_node_put(tsi_pci); + of_node_put(cascade_node); #endif /* Configure MPIC outputs to CPU0 */ tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); @@ -200,16 +205,16 @@ static void __noreturn holly_restart(char *cmd) __be32 __iomem *ocn_bar1 = NULL; unsigned long bar; struct device_node *bridge = NULL; - const void *prop; - int size; + struct resource res; phys_addr_t addr = 0xc0000000; local_irq_disable(); bridge = of_find_node_by_type(NULL, "tsi-bridge"); if (bridge) { - prop = of_get_property(bridge, "reg", &size); - addr = of_translate_address(bridge, prop); + of_address_to_resource(bridge, 0, &res); + addr = res.start; + of_node_put(bridge); } addr += (TSI108_PB_OFFSET + 0x414); @@ -235,16 +240,6 @@ static void __noreturn holly_restart(char *cmd) for (;;) ; } -/* - * Called very early, device-tree isn't unflattened - */ -static int __init holly_probe(void) -{ - if (!of_machine_is_compatible("ibm,holly")) - return 0; - return 1; -} - static int ppc750_machine_check_exception(struct pt_regs *regs) { const struct exception_table_entry *entry; @@ -261,14 +256,13 @@ static int ppc750_machine_check_exception(struct pt_regs *regs) define_machine(holly){ .name = "PPC750 GX/CL TSI", - .probe = holly_probe, + .compatible = "ibm,holly", .setup_arch = holly_setup_arch, .discover_phbs = holly_init_pci, .init_IRQ = holly_init_IRQ, .show_cpuinfo = holly_show_cpuinfo, .get_irq = mpic_get_irq, .restart = holly_restart, - .calibrate_decr = generic_calibrate_decr, .machine_check_exception = ppc750_machine_check_exception, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index 1830e1ac1f8f..4012f206ec63 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/initrd.h> #include <linux/of_platform.h> +#include <linux/seq_file.h> #include <asm/time.h> #include <asm/mpic.h> @@ -99,9 +100,6 @@ static void __init linkstation_init_IRQ(void) mpic_init(mpic); } -extern void avr_uart_configure(void); -extern void avr_uart_send(const char); - static void __noreturn linkstation_restart(char *cmd) { local_irq_disable(); @@ -143,9 +141,6 @@ static void linkstation_show_cpuinfo(struct seq_file *m) static int __init linkstation_probe(void) { - if (!of_machine_is_compatible("linkstation")) - return 0; - pm_power_off = linkstation_power_off; return 1; @@ -153,6 +148,7 @@ static int __init linkstation_probe(void) define_machine(linkstation){ .name = "Buffalo Linkstation", + .compatible = "linkstation", .probe = linkstation_probe, .setup_arch = linkstation_setup_arch, .discover_phbs = linkstation_setup_pci, @@ -161,5 +157,4 @@ define_machine(linkstation){ .get_irq = mpic_get_irq, .restart = linkstation_restart, .halt = linkstation_halt, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c index 0133e175a0fc..6c1dbf8ae718 100644 --- a/arch/powerpc/platforms/embedded6xx/ls_uart.c +++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c @@ -15,6 +15,7 @@ #include <linux/serial_reg.h> #include <linux/serial_8250.h> #include <linux/of.h> +#include <linux/of_address.h> #include <asm/io.h> #include <asm/termbits.h> @@ -114,20 +115,24 @@ static void __init ls_uart_init(void) static int __init ls_uarts_init(void) { struct device_node *avr; - phys_addr_t phys_addr; - int len; + struct resource res; + int len, ret; avr = of_find_node_by_path("/soc10x/serial@80004500"); if (!avr) return -EINVAL; avr_clock = *(u32*)of_get_property(avr, "clock-frequency", &len); - phys_addr = ((u32*)of_get_property(avr, "reg", &len))[0]; - - if (!avr_clock || !phys_addr) + if (!avr_clock) return -EINVAL; - avr_addr = ioremap(phys_addr, 32); + ret = of_address_to_resource(avr, 0, &res); + if (ret) + return ret; + + of_node_put(avr); + + avr_addr = ioremap(res.start, 32); if (!avr_addr) return -EFAULT; diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h index 5ad12023e562..ebc258fa4858 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc10x.h +++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h @@ -156,4 +156,7 @@ int mpc10x_disable_store_gathering(struct pci_controller *hose); /* For MPC107 boards that use the built-in openpic */ void mpc10x_set_openpic(void); +void avr_uart_configure(void); +void avr_uart_send(const char c); + #endif /* __PPC_KERNEL_MPC10X_H */ diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c deleted file mode 100644 index 8b2b42210356..000000000000 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ /dev/null @@ -1,195 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * mpc7448_hpc2.c - * - * Board setup routines for the Freescale mpc7448hpc2(taiga) platform - * - * Author: Jacob Pan - * jacob.pan@freescale.com - * Author: Xianghua Xiao - * x.xiao@freescale.com - * Maintainer: Roy Zang <tie-fei.zang@freescale.com> - * Add Flat Device Tree support fot mpc7448hpc2 board - * - * Copyright 2004-2006 Freescale Semiconductor, Inc. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/console.h> -#include <linux/extable.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/serial.h> -#include <linux/tty.h> -#include <linux/serial_core.h> -#include <linux/of_irq.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/udbg.h> -#include <asm/tsi108.h> -#include <asm/pci-bridge.h> -#include <asm/reg.h> -#include <mm/mmu_decl.h> -#include <asm/tsi108_pci.h> -#include <asm/tsi108_irq.h> -#include <asm/mpic.h> - -#undef DEBUG -#ifdef DEBUG -#define DBG(fmt...) do { printk(fmt); } while(0) -#else -#define DBG(fmt...) do { } while(0) -#endif - -#define MPC7448HPC2_PCI_CFG_PHYS 0xfb000000 - -int mpc7448_hpc2_exclude_device(struct pci_controller *hose, - u_char bus, u_char devfn) -{ - if (bus == 0 && PCI_SLOT(devfn) == 0) - return PCIBIOS_DEVICE_NOT_FOUND; - else - return PCIBIOS_SUCCESSFUL; -} - -static void __init mpc7448_hpc2_setup_pci(void) -{ -#ifdef CONFIG_PCI - struct device_node *np; - if (ppc_md.progress) - ppc_md.progress("mpc7448_hpc2_setup_pci():set_bridge", 0); - - /* setup PCI host bridge */ - for_each_compatible_node(np, "pci", "tsi108-pci") - tsi108_setup_pci(np, MPC7448HPC2_PCI_CFG_PHYS, 0); - - ppc_md.pci_exclude_device = mpc7448_hpc2_exclude_device; - if (ppc_md.progress) - ppc_md.progress("tsi108: resources set", 0x100); -#endif -} - -static void __init mpc7448_hpc2_setup_arch(void) -{ - tsi108_csr_vir_base = get_vir_csrbase(); - - printk(KERN_INFO "MPC7448HPC2 (TAIGA) Platform\n"); - printk(KERN_INFO - "Jointly ported by Freescale and Tundra Semiconductor\n"); - printk(KERN_INFO - "Enabling L2 cache then enabling the HID0 prefetch engine.\n"); -} - -/* - * Interrupt setup and service. Interrupts on the mpc7448_hpc2 come - * from the four external INT pins, PCI interrupts are routed via - * PCI interrupt control registers, it generates internal IRQ23 - * - * Interrupt routing on the Taiga Board: - * TSI108:PB_INT[0] -> CPU0:INT# - * TSI108:PB_INT[1] -> CPU0:MCP# - * TSI108:PB_INT[2] -> N/C - * TSI108:PB_INT[3] -> N/C - */ -static void __init mpc7448_hpc2_init_IRQ(void) -{ - struct mpic *mpic; -#ifdef CONFIG_PCI - unsigned int cascade_pci_irq; - struct device_node *tsi_pci; - struct device_node *cascade_node = NULL; -#endif - - mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | - MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108, - 24, 0, - "Tsi108_PIC"); - - BUG_ON(mpic == NULL); - - mpic_assign_isu(mpic, 0, mpic->paddr + 0x100); - - mpic_init(mpic); - -#ifdef CONFIG_PCI - tsi_pci = of_find_node_by_type(NULL, "pci"); - if (tsi_pci == NULL) { - printk("%s: No tsi108 pci node found !\n", __func__); - return; - } - cascade_node = of_find_node_by_type(NULL, "pic-router"); - if (cascade_node == NULL) { - printk("%s: No tsi108 pci cascade node found !\n", __func__); - return; - } - - cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); - DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, - (u32) cascade_pci_irq); - tsi108_pci_int_init(cascade_node); - irq_set_handler_data(cascade_pci_irq, mpic); - irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); -#endif - /* Configure MPIC outputs to CPU0 */ - tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); -} - -void mpc7448_hpc2_show_cpuinfo(struct seq_file *m) -{ - seq_printf(m, "vendor\t\t: Freescale Semiconductor\n"); -} - -static void __noreturn mpc7448_hpc2_restart(char *cmd) -{ - local_irq_disable(); - - /* Set exception prefix high - to the firmware */ - mtmsr(mfmsr() | MSR_IP); - isync(); - - for (;;) ; /* Spin until reset happens */ -} - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mpc7448_hpc2_probe(void) -{ - if (!of_machine_is_compatible("mpc74xx")) - return 0; - return 1; -} - -static int mpc7448_machine_check_exception(struct pt_regs *regs) -{ - const struct exception_table_entry *entry; - - /* Are we prepared to handle this fault */ - if ((entry = search_exception_tables(regs->nip)) != NULL) { - tsi108_clear_pci_cfg_error(); - regs_set_recoverable(regs); - regs_set_return_ip(regs, extable_fixup(entry)); - return 1; - } - return 0; -} - -define_machine(mpc7448_hpc2){ - .name = "MPC7448 HPC2", - .probe = mpc7448_hpc2_probe, - .setup_arch = mpc7448_hpc2_setup_arch, - .discover_phbs = mpc7448_hpc2_setup_pci, - .init_IRQ = mpc7448_hpc2_init_IRQ, - .show_cpuinfo = mpc7448_hpc2_show_cpuinfo, - .get_irq = mpic_get_irq, - .restart = mpc7448_hpc2_restart, - .calibrate_decr = generic_calibrate_decr, - .machine_check_exception= mpc7448_machine_check_exception, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 4854cc592cec..5ca41972ef22 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c @@ -14,6 +14,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/seq_file.h> #include <asm/i8259.h> #include <asm/pci-bridge.h> @@ -186,14 +187,6 @@ static void __noreturn mvme5100_restart(char *cmd) ; } -/* - * Called very early, device-tree isn't unflattened - */ -static int __init mvme5100_probe(void) -{ - return of_machine_is_compatible("MVME5100"); -} - static int __init probe_of_platform_devices(void) { @@ -205,13 +198,12 @@ machine_device_initcall(mvme5100, probe_of_platform_devices); define_machine(mvme5100) { .name = "MVME5100", - .probe = mvme5100_probe, + .compatible = "MVME5100", .setup_arch = mvme5100_setup_arch, .discover_phbs = mvme5100_setup_pci, .init_IRQ = mvme5100_pic_init, .show_cpuinfo = mvme5100_show_cpuinfo, .get_irq = mpic_get_irq, .restart = mvme5100_restart, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index 5f16e80b6ed6..e49880e8dab8 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -110,18 +110,12 @@ static void __noreturn storcenter_restart(char *cmd) for (;;) ; } -static int __init storcenter_probe(void) -{ - return of_machine_is_compatible("iomega,storcenter"); -} - define_machine(storcenter){ .name = "IOMEGA StorCenter", - .probe = storcenter_probe, + .compatible = "iomega,storcenter", .setup_arch = storcenter_setup_arch, .discover_phbs = storcenter_setup_pci, .init_IRQ = storcenter_init_IRQ, .get_irq = mpic_get_irq, .restart = storcenter_restart, - .calibrate_decr = generic_calibrate_decr, }; diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c index e02bdabf358c..221577f32b01 100644 --- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c +++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c @@ -193,24 +193,6 @@ static int ug_udbg_getc_poll(void) } /* - * Retrieves and prepares the virtual address needed to access the hardware. - */ -static void __iomem *__init ug_udbg_setup_exi_io_base(struct device_node *np) -{ - void __iomem *exi_io_base = NULL; - phys_addr_t paddr; - const unsigned int *reg; - - reg = of_get_property(np, "reg", NULL); - if (reg) { - paddr = of_translate_address(np, reg); - if (paddr) - exi_io_base = ioremap(paddr, reg[1]); - } - return exi_io_base; -} - -/* * Checks if a USB Gecko adapter is inserted in any memory card slot. */ static void __iomem *__init ug_udbg_probe(void __iomem *exi_io_base) @@ -246,7 +228,7 @@ void __init ug_udbg_init(void) goto out; } - exi_io_base = ug_udbg_setup_exi_io_base(np); + exi_io_base = of_iomap(np, 0); if (!exi_io_base) { udbg_printf("%s: failed to setup EXI io base\n", __func__); goto done; diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 9e03ff8f631c..cb3be6d6e339 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -15,8 +15,6 @@ #include <linux/seq_file.h> #include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/memblock.h> -#include <mm/mmu_decl.h> #include <asm/io.h> #include <asm/machdep.h> @@ -49,19 +47,6 @@ static void __iomem *hw_ctrl; static void __iomem *hw_gpio; -static int __init page_aligned(unsigned long x) -{ - return !(x & (PAGE_SIZE-1)); -} - -void __init wii_memory_fixups(void) -{ - struct memblock_region *p = memblock.memory.regions; - - BUG_ON(memblock.memory.cnt != 2); - BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); -} - static void __noreturn wii_spin(void) { local_irq_disable(); @@ -89,8 +74,8 @@ static void __iomem *__init wii_ioremap_hw_regs(char *name, char *compatible) hw_regs = ioremap(res.start, resource_size(&res)); if (hw_regs) { - pr_info("%s at 0x%08x mapped to 0x%p\n", name, - res.start, hw_regs); + pr_info("%s at 0x%pa mapped to 0x%p\n", name, + &res.start, hw_regs); } out_put: @@ -156,9 +141,6 @@ static void __init wii_pic_probe(void) static int __init wii_probe(void) { - if (!of_machine_is_compatible("nintendo,wii")) - return 0; - pm_power_off = wii_power_off; ug_udbg_init(); @@ -179,23 +161,20 @@ static const struct of_device_id wii_of_bus[] = { static int __init wii_device_probe(void) { - if (!machine_is(wii)) - return 0; - of_platform_populate(NULL, wii_of_bus, NULL, NULL); return 0; } -device_initcall(wii_device_probe); +machine_device_initcall(wii, wii_device_probe); define_machine(wii) { .name = "wii", + .compatible = "nintendo,wii", .probe = wii_probe, .setup_arch = wii_setup_arch, .restart = wii_restart, .halt = wii_halt, .init_IRQ = wii_pic_probe, .get_irq = flipper_pic_get_irq, - .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, .machine_shutdown = wii_shutdown, }; diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c index 84afae7a2561..b8d37a9932f1 100644 --- a/arch/powerpc/platforms/fsl_uli1575.c +++ b/arch/powerpc/platforms/fsl_uli1575.c @@ -13,6 +13,9 @@ #include <linux/of_irq.h> #include <asm/pci-bridge.h> +#include <asm/ppc-pci.h> + +#include <sysdev/fsl_pci.h> #define ULI_PIRQA 0x08 #define ULI_PIRQB 0x09 @@ -36,7 +39,7 @@ #define ULI_8259_IRQ14 0x0d #define ULI_8259_IRQ15 0x0f -u8 uli_pirq_to_irq[8] = { +static u8 uli_pirq_to_irq[8] = { ULI_8259_IRQ9, /* PIRQA */ ULI_8259_IRQ10, /* PIRQB */ ULI_8259_IRQ11, /* PIRQC */ @@ -341,10 +344,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, hpcd_quirk_uli5288); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, hpcd_quirk_uli5229); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x5288, hpcd_final_uli5288); -int uli_exclude_device(struct pci_controller *hose, - u_char bus, u_char devfn) +static int uli_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn) { - if (bus == (hose->first_busno + 2)) { + if (hose->dn == fsl_pci_primary && bus == (hose->first_busno + 2)) { /* exclude Modem controller */ if ((PCI_SLOT(devfn) == 29) && (PCI_FUNC(devfn) == 1)) return PCIBIOS_DEVICE_NOT_FOUND; @@ -356,3 +358,22 @@ int uli_exclude_device(struct pci_controller *hose, return PCIBIOS_SUCCESSFUL; } + +void __init uli_init(void) +{ + struct device_node *node; + struct device_node *pci_with_uli; + + /* See if we have a ULI under the primary */ + + node = of_find_node_by_name(NULL, "uli1575"); + while ((pci_with_uli = of_get_parent(node))) { + of_node_put(node); + node = pci_with_uli; + + if (pci_with_uli == fsl_pci_primary) { + ppc_md.pci_exclude_device = uli_exclude_device; + break; + } + } +} diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig deleted file mode 100644 index 4c058cc57c90..000000000000 --- a/arch/powerpc/platforms/maple/Kconfig +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config PPC_MAPLE - depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN - bool "Maple 970FX Evaluation Board" - select FORCE_PCI - select MPIC - select U3_DART - select MPIC_U3_HT_IRQS - select GENERIC_TBSYNC - select PPC_UDBG_16550 - select PPC_970_NAP - select PPC_64S_HASH_MMU - select PPC_HASH_MMU_NATIVE - select PPC_RTAS - select MMIO_NVRAM - select ATA_NONSTANDARD if ATA - help - This option enables support for the Maple 970FX Evaluation Board. - For more information, refer to <http://www.970eval.com> diff --git a/arch/powerpc/platforms/maple/Makefile b/arch/powerpc/platforms/maple/Makefile deleted file mode 100644 index 19f35ab828a7..000000000000 --- a/arch/powerpc/platforms/maple/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-y += setup.o pci.o time.o diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h deleted file mode 100644 index 4f358b55c341..000000000000 --- a/arch/powerpc/platforms/maple/maple.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Declarations for maple-specific code. - * - * Maple is the name of a PPC970 evaluation board. - */ -extern int maple_set_rtc_time(struct rtc_time *tm); -extern void maple_get_rtc_time(struct rtc_time *tm); -extern time64_t maple_get_boot_time(void); -extern void maple_calibrate_decr(void); -extern void maple_pci_init(void); -extern void maple_pci_irq_fixup(struct pci_dev *dev); -extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel); - -extern struct pci_controller_ops maple_pci_controller_ops; diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c deleted file mode 100644 index b911b31717cc..000000000000 --- a/arch/powerpc/platforms/maple/pci.c +++ /dev/null @@ -1,672 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/of_irq.h> - -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/pci-bridge.h> -#include <asm/machdep.h> -#include <asm/iommu.h> -#include <asm/ppc-pci.h> -#include <asm/isa-bridge.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -static struct pci_controller *u3_agp, *u3_ht, *u4_pcie; - -static int __init fixup_one_level_bus_range(struct device_node *node, int higher) -{ - for (; node; node = node->sibling) { - const int *bus_range; - const unsigned int *class_code; - int len; - - /* For PCI<->PCI bridges or CardBus bridges, we go down */ - class_code = of_get_property(node, "class-code", NULL); - if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) - continue; - bus_range = of_get_property(node, "bus-range", &len); - if (bus_range != NULL && len > 2 * sizeof(int)) { - if (bus_range[1] > higher) - higher = bus_range[1]; - } - higher = fixup_one_level_bus_range(node->child, higher); - } - return higher; -} - -/* This routine fixes the "bus-range" property of all bridges in the - * system since they tend to have their "last" member wrong on macs - * - * Note that the bus numbers manipulated here are OF bus numbers, they - * are not Linux bus numbers. - */ -static void __init fixup_bus_range(struct device_node *bridge) -{ - int *bus_range; - struct property *prop; - int len; - - /* Lookup the "bus-range" property for the hose */ - prop = of_find_property(bridge, "bus-range", &len); - if (prop == NULL || prop->value == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %pOF\n", - bridge); - return; - } - bus_range = prop->value; - bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); -} - - -static unsigned long u3_agp_cfa0(u8 devfn, u8 off) -{ - return (1 << (unsigned long)PCI_SLOT(devfn)) | - ((unsigned long)PCI_FUNC(devfn) << 8) | - ((unsigned long)off & 0xFCUL); -} - -static unsigned long u3_agp_cfa1(u8 bus, u8 devfn, u8 off) -{ - return ((unsigned long)bus << 16) | - ((unsigned long)devfn << 8) | - ((unsigned long)off & 0xFCUL) | - 1UL; -} - -static volatile void __iomem *u3_agp_cfg_access(struct pci_controller* hose, - u8 bus, u8 dev_fn, u8 offset) -{ - unsigned int caddr; - - if (bus == hose->first_busno) { - if (dev_fn < (11 << 3)) - return NULL; - caddr = u3_agp_cfa0(dev_fn, offset); - } else - caddr = u3_agp_cfa1(bus, dev_fn, offset); - - /* Uninorth will return garbage if we don't read back the value ! */ - do { - out_le32(hose->cfg_addr, caddr); - } while (in_le32(hose->cfg_addr) != caddr); - - offset &= 0x07; - return hose->cfg_data + offset; -} - -static int u3_agp_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - addr = u3_agp_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static int u3_agp_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - addr = u3_agp_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u3_agp_pci_ops = -{ - .read = u3_agp_read_config, - .write = u3_agp_write_config, -}; - -static unsigned long u3_ht_cfa0(u8 devfn, u8 off) -{ - return (devfn << 8) | off; -} - -static unsigned long u3_ht_cfa1(u8 bus, u8 devfn, u8 off) -{ - return u3_ht_cfa0(devfn, off) + (bus << 16) + 0x01000000UL; -} - -static volatile void __iomem *u3_ht_cfg_access(struct pci_controller* hose, - u8 bus, u8 devfn, u8 offset) -{ - if (bus == hose->first_busno) { - if (PCI_SLOT(devfn) == 0) - return NULL; - return hose->cfg_data + u3_ht_cfa0(devfn, offset); - } else - return hose->cfg_data + u3_ht_cfa1(bus, devfn, offset); -} - -static int u3_ht_root_read_config(struct pci_controller *hose, u8 offset, - int len, u32 *val) -{ - volatile void __iomem *addr; - - addr = hose->cfg_addr; - addr += ((offset & ~3) << 2) + (4 - len - (offset & 3)); - - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_be16(addr); - break; - default: - *val = in_be32(addr); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_root_write_config(struct pci_controller *hose, u8 offset, - int len, u32 val) -{ - volatile void __iomem *addr; - - addr = hose->cfg_addr + ((offset & ~3) << 2) + (4 - len - (offset & 3)); - - if (offset >= PCI_BASE_ADDRESS_0 && offset < PCI_CAPABILITY_LIST) - return PCIBIOS_SUCCESSFUL; - - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_be16(addr, val); - break; - default: - out_be32(addr, val); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0)) - return u3_ht_root_read_config(hose, offset, len, val); - - if (offset > 0xff) - return PCIBIOS_BAD_REGISTER_NUMBER; - - addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0)) - return u3_ht_root_write_config(hose, offset, len, val); - - if (offset > 0xff) - return PCIBIOS_BAD_REGISTER_NUMBER; - - addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u3_ht_pci_ops = -{ - .read = u3_ht_read_config, - .write = u3_ht_write_config, -}; - -static unsigned int u4_pcie_cfa0(unsigned int devfn, unsigned int off) -{ - return (1 << PCI_SLOT(devfn)) | - (PCI_FUNC(devfn) << 8) | - ((off >> 8) << 28) | - (off & 0xfcu); -} - -static unsigned int u4_pcie_cfa1(unsigned int bus, unsigned int devfn, - unsigned int off) -{ - return (bus << 16) | - (devfn << 8) | - ((off >> 8) << 28) | - (off & 0xfcu) | 1u; -} - -static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose, - u8 bus, u8 dev_fn, int offset) -{ - unsigned int caddr; - - if (bus == hose->first_busno) - caddr = u4_pcie_cfa0(dev_fn, offset); - else - caddr = u4_pcie_cfa1(bus, dev_fn, offset); - - /* Uninorth will return garbage if we don't read back the value ! */ - do { - out_le32(hose->cfg_addr, caddr); - } while (in_le32(hose->cfg_addr) != caddr); - - offset &= 0x03; - return hose->cfg_data + offset; -} - -static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= 0x1000) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} -static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= 0x1000) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u4_pcie_pci_ops = -{ - .read = u4_pcie_read_config, - .write = u4_pcie_write_config, -}; - -static void __init setup_u3_agp(struct pci_controller* hose) -{ - /* On G5, we move AGP up to high bus number so we don't need - * to reassign bus numbers for HT. If we ever have P2P bridges - * on AGP, we'll have to move pci_assign_all_buses to the - * pci_controller structure so we enable it for AGP and not for - * HT childs. - * We hard code the address because of the different size of - * the reg address cell, we shall fix that by killing struct - * reg_property and using some accessor functions instead - */ - hose->first_busno = 0xf0; - hose->last_busno = 0xff; - hose->ops = &u3_agp_pci_ops; - hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); - hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); - - u3_agp = hose; -} - -static void __init setup_u4_pcie(struct pci_controller* hose) -{ - /* We currently only implement the "non-atomic" config space, to - * be optimised later. - */ - hose->ops = &u4_pcie_pci_ops; - hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); - hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); - - u4_pcie = hose; -} - -static void __init setup_u3_ht(struct pci_controller* hose) -{ - hose->ops = &u3_ht_pci_ops; - - /* We hard code the address because of the different size of - * the reg address cell, we shall fix that by killing struct - * reg_property and using some accessor functions instead - */ - hose->cfg_data = ioremap(0xf2000000, 0x02000000); - hose->cfg_addr = ioremap(0xf8070000, 0x1000); - - hose->first_busno = 0; - hose->last_busno = 0xef; - - u3_ht = hose; -} - -static int __init maple_add_bridge(struct device_node *dev) -{ - int len; - struct pci_controller *hose; - char* disp_name; - const int *bus_range; - int primary = 1; - - DBG("Adding PCI host bridge %pOF\n", dev); - - bus_range = of_get_property(dev, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %pOF, assume bus 0\n", - dev); - } - - hose = pcibios_alloc_controller(dev); - if (hose == NULL) - return -ENOMEM; - hose->first_busno = bus_range ? bus_range[0] : 0; - hose->last_busno = bus_range ? bus_range[1] : 0xff; - hose->controller_ops = maple_pci_controller_ops; - - disp_name = NULL; - if (of_device_is_compatible(dev, "u3-agp")) { - setup_u3_agp(hose); - disp_name = "U3-AGP"; - primary = 0; - } else if (of_device_is_compatible(dev, "u3-ht")) { - setup_u3_ht(hose); - disp_name = "U3-HT"; - primary = 1; - } else if (of_device_is_compatible(dev, "u4-pcie")) { - setup_u4_pcie(hose); - disp_name = "U4-PCIE"; - primary = 0; - } - printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n", - disp_name, hose->first_busno, hose->last_busno); - - /* Interpret the "ranges" property */ - /* This also maps the I/O region and sets isa_io/mem_base */ - pci_process_bridge_OF_ranges(hose, dev, primary); - - /* Fixup "bus-range" OF property */ - fixup_bus_range(dev); - - /* Check for legacy IOs */ - isa_bridge_find_early(hose); - - /* create pci_dn's for DT nodes under this PHB */ - pci_devs_phb_init_dynamic(hose); - - return 0; -} - - -void maple_pci_irq_fixup(struct pci_dev *dev) -{ - DBG(" -> maple_pci_irq_fixup\n"); - - /* Fixup IRQ for PCIe host */ - if (u4_pcie != NULL && dev->bus->number == 0 && - pci_bus_to_host(dev->bus) == u4_pcie) { - printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); - dev->irq = irq_create_mapping(NULL, 1); - if (dev->irq) - irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); - } - - /* Hide AMD8111 IDE interrupt when in legacy mode so - * the driver calls pci_get_legacy_ide_irq() - */ - if (dev->vendor == PCI_VENDOR_ID_AMD && - dev->device == PCI_DEVICE_ID_AMD_8111_IDE && - (dev->class & 5) != 5) { - dev->irq = 0; - } - - DBG(" <- maple_pci_irq_fixup\n"); -} - -static int maple_pci_root_bridge_prepare(struct pci_host_bridge *bridge) -{ - struct pci_controller *hose = pci_bus_to_host(bridge->bus); - struct device_node *np, *child; - - if (hose != u3_agp) - return 0; - - /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We - * assume there is no P2P bridge on the AGP bus, which should be a - * safe assumptions hopefully. - */ - np = hose->dn; - PCI_DN(np)->busno = 0xf0; - for_each_child_of_node(np, child) - PCI_DN(child)->busno = 0xf0; - - return 0; -} - -void __init maple_pci_init(void) -{ - struct device_node *np, *root; - struct device_node *ht = NULL; - - /* Probe root PCI hosts, that is on U3 the AGP host and the - * HyperTransport host. That one is actually "kept" around - * and actually added last as it's resource management relies - * on the AGP resources to have been setup first - */ - root = of_find_node_by_path("/"); - if (root == NULL) { - printk(KERN_CRIT "maple_find_bridges: can't find root of device tree\n"); - return; - } - for_each_child_of_node(root, np) { - if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "ht")) - continue; - if ((of_device_is_compatible(np, "u4-pcie") || - of_device_is_compatible(np, "u3-agp")) && - maple_add_bridge(np) == 0) - of_node_get(np); - - if (of_device_is_compatible(np, "u3-ht")) { - of_node_get(np); - ht = np; - } - } - of_node_put(root); - - /* Now setup the HyperTransport host if we found any - */ - if (ht && maple_add_bridge(ht) != 0) - of_node_put(ht); - - ppc_md.pcibios_root_bridge_prepare = maple_pci_root_bridge_prepare; - - /* Tell pci.c to not change any resource allocations. */ - pci_add_flags(PCI_PROBE_ONLY); -} - -int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) -{ - struct device_node *np; - unsigned int defirq = channel ? 15 : 14; - unsigned int irq; - - if (pdev->vendor != PCI_VENDOR_ID_AMD || - pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) - return defirq; - - np = pci_device_to_OF_node(pdev); - if (np == NULL) { - printk("Failed to locate OF node for IDE %s\n", - pci_name(pdev)); - return defirq; - } - irq = irq_of_parse_and_map(np, channel & 0x1); - if (!irq) { - printk("Failed to map onboard IDE interrupt for channel %d\n", - channel); - return defirq; - } - return irq; -} - -static void quirk_ipr_msi(struct pci_dev *dev) -{ - /* Something prevents MSIs from the IPR from working on Bimini, - * and the driver has no smarts to recover. So disable MSI - * on it for now. */ - - if (machine_is(maple)) { - dev->no_msi = 1; - dev_info(&dev->dev, "Quirk disabled MSI\n"); - } -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - quirk_ipr_msi); - -struct pci_controller_ops maple_pci_controller_ops = { -}; diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c deleted file mode 100644 index c26c379e1cc8..000000000000 --- a/arch/powerpc/platforms/maple/setup.c +++ /dev/null @@ -1,363 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Maple (970 eval board) setup code - * - * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/user.h> -#include <linux/tty.h> -#include <linux/string.h> -#include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/major.h> -#include <linux/initrd.h> -#include <linux/vt_kern.h> -#include <linux/console.h> -#include <linux/pci.h> -#include <linux/adb.h> -#include <linux/cuda.h> -#include <linux/pmu.h> -#include <linux/irq.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/serial.h> -#include <linux/smp.h> -#include <linux/bitops.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/memblock.h> - -#include <asm/processor.h> -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/pci-bridge.h> -#include <asm/iommu.h> -#include <asm/machdep.h> -#include <asm/dma.h> -#include <asm/cputable.h> -#include <asm/time.h> -#include <asm/mpic.h> -#include <asm/rtas.h> -#include <asm/udbg.h> -#include <asm/nvram.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -static unsigned long maple_find_nvram_base(void) -{ - struct device_node *rtcs; - unsigned long result = 0; - - /* find NVRAM device */ - rtcs = of_find_compatible_node(NULL, "nvram", "AMD8111"); - if (rtcs) { - struct resource r; - if (of_address_to_resource(rtcs, 0, &r)) { - printk(KERN_EMERG "Maple: Unable to translate NVRAM" - " address\n"); - goto bail; - } - if (!(r.flags & IORESOURCE_IO)) { - printk(KERN_EMERG "Maple: NVRAM address isn't PIO!\n"); - goto bail; - } - result = r.start; - } else - printk(KERN_EMERG "Maple: Unable to find NVRAM\n"); - bail: - of_node_put(rtcs); - return result; -} - -static void __noreturn maple_restart(char *cmd) -{ - unsigned int maple_nvram_base; - const unsigned int *maple_nvram_offset, *maple_nvram_command; - struct device_node *sp; - - maple_nvram_base = maple_find_nvram_base(); - if (maple_nvram_base == 0) - goto fail; - - /* find service processor device */ - sp = of_find_node_by_name(NULL, "service-processor"); - if (!sp) { - printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); - goto fail; - } - maple_nvram_offset = of_get_property(sp, "restart-addr", NULL); - maple_nvram_command = of_get_property(sp, "restart-value", NULL); - of_node_put(sp); - - /* send command */ - outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); - for (;;) ; - fail: - printk(KERN_EMERG "Maple: Manual Restart Required\n"); - for (;;) ; -} - -static void __noreturn maple_power_off(void) -{ - unsigned int maple_nvram_base; - const unsigned int *maple_nvram_offset, *maple_nvram_command; - struct device_node *sp; - - maple_nvram_base = maple_find_nvram_base(); - if (maple_nvram_base == 0) - goto fail; - - /* find service processor device */ - sp = of_find_node_by_name(NULL, "service-processor"); - if (!sp) { - printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); - goto fail; - } - maple_nvram_offset = of_get_property(sp, "power-off-addr", NULL); - maple_nvram_command = of_get_property(sp, "power-off-value", NULL); - of_node_put(sp); - - /* send command */ - outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); - for (;;) ; - fail: - printk(KERN_EMERG "Maple: Manual Power-Down Required\n"); - for (;;) ; -} - -static void __noreturn maple_halt(void) -{ - maple_power_off(); -} - -#ifdef CONFIG_SMP -static struct smp_ops_t maple_smp_ops = { - .probe = smp_mpic_probe, - .message_pass = smp_mpic_message_pass, - .kick_cpu = smp_generic_kick_cpu, - .setup_cpu = smp_mpic_setup_cpu, - .give_timebase = smp_generic_give_timebase, - .take_timebase = smp_generic_take_timebase, -}; -#endif /* CONFIG_SMP */ - -static void __init maple_use_rtas_reboot_and_halt_if_present(void) -{ - if (rtas_service_present("system-reboot") && - rtas_service_present("power-off")) { - ppc_md.restart = rtas_restart; - pm_power_off = rtas_power_off; - ppc_md.halt = rtas_halt; - } -} - -static void __init maple_setup_arch(void) -{ - /* init to some ~sane value until calibrate_delay() runs */ - loops_per_jiffy = 50000000; - - /* Setup SMP callback */ -#ifdef CONFIG_SMP - smp_ops = &maple_smp_ops; -#endif - maple_use_rtas_reboot_and_halt_if_present(); - - printk(KERN_DEBUG "Using native/NAP idle loop\n"); - - mmio_nvram_init(); -} - -/* - * This is almost identical to pSeries and CHRP. We need to make that - * code generic at one point, with appropriate bits in the device-tree to - * identify the presence of an HT APIC - */ -static void __init maple_init_IRQ(void) -{ - struct device_node *root, *np, *mpic_node = NULL; - const unsigned int *opprop; - unsigned long openpic_addr = 0; - int naddr, n, i, opplen, has_isus = 0; - struct mpic *mpic; - unsigned int flags = 0; - - /* Locate MPIC in the device-tree. Note that there is a bug - * in Maple device-tree where the type of the controller is - * open-pic and not interrupt-controller - */ - - for_each_node_by_type(np, "interrupt-controller") - if (of_device_is_compatible(np, "open-pic")) { - mpic_node = np; - break; - } - if (mpic_node == NULL) - for_each_node_by_type(np, "open-pic") { - mpic_node = np; - break; - } - if (mpic_node == NULL) { - printk(KERN_ERR - "Failed to locate the MPIC interrupt controller\n"); - return; - } - - /* Find address list in /platform-open-pic */ - root = of_find_node_by_path("/"); - naddr = of_n_addr_cells(root); - opprop = of_get_property(root, "platform-open-pic", &opplen); - if (opprop) { - openpic_addr = of_read_number(opprop, naddr); - has_isus = (opplen > naddr); - printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", - openpic_addr, has_isus); - } - - BUG_ON(openpic_addr == 0); - - /* Check for a big endian MPIC */ - if (of_get_property(np, "big-endian", NULL) != NULL) - flags |= MPIC_BIG_ENDIAN; - - /* XXX Maple specific bits */ - flags |= MPIC_U3_HT_IRQS; - /* All U3/U4 are big-endian, older SLOF firmware doesn't encode this */ - flags |= MPIC_BIG_ENDIAN; - - /* Setup the openpic driver. More device-tree junks, we hard code no - * ISUs for now. I'll have to revisit some stuffs with the folks doing - * the firmware for those - */ - mpic = mpic_alloc(mpic_node, openpic_addr, flags, - /*has_isus ? 16 :*/ 0, 0, " MPIC "); - BUG_ON(mpic == NULL); - - /* Add ISUs */ - opplen /= sizeof(u32); - for (n = 0, i = naddr; i < opplen; i += naddr, n++) { - unsigned long isuaddr = of_read_number(opprop + i, naddr); - mpic_assign_isu(mpic, n, isuaddr); - } - - /* All ISUs are setup, complete initialization */ - mpic_init(mpic); - ppc_md.get_irq = mpic_get_irq; - of_node_put(mpic_node); - of_node_put(root); -} - -static void __init maple_progress(char *s, unsigned short hex) -{ - printk("*** %04x : %s\n", hex, s ? s : ""); -} - - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init maple_probe(void) -{ - if (!of_machine_is_compatible("Momentum,Maple") && - !of_machine_is_compatible("Momentum,Apache")) - return 0; - - pm_power_off = maple_power_off; - - iommu_init_early_dart(&maple_pci_controller_ops); - - return 1; -} - -#ifdef CONFIG_EDAC -/* - * Register a platform device for CPC925 memory controller on - * all boards with U3H (CPC925) bridge. - */ -static int __init maple_cpc925_edac_setup(void) -{ - struct platform_device *pdev; - struct device_node *np = NULL; - struct resource r; - int ret; - volatile void __iomem *mem; - u32 rev; - - np = of_find_node_by_type(NULL, "memory-controller"); - if (!np) { - printk(KERN_ERR "%s: Unable to find memory-controller node\n", - __func__); - return -ENODEV; - } - - ret = of_address_to_resource(np, 0, &r); - of_node_put(np); - - if (ret < 0) { - printk(KERN_ERR "%s: Unable to get memory-controller reg\n", - __func__); - return -ENODEV; - } - - mem = ioremap(r.start, resource_size(&r)); - if (!mem) { - printk(KERN_ERR "%s: Unable to map memory-controller memory\n", - __func__); - return -ENOMEM; - } - - rev = __raw_readl(mem); - iounmap(mem); - - if (rev < 0x34 || rev > 0x3f) { /* U3H */ - printk(KERN_ERR "%s: Non-CPC925(U3H) bridge revision: %02x\n", - __func__, rev); - return 0; - } - - pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - printk(KERN_INFO "%s: CPC925 platform device created\n", __func__); - - return 0; -} -machine_device_initcall(maple, maple_cpc925_edac_setup); -#endif - -define_machine(maple) { - .name = "Maple", - .probe = maple_probe, - .setup_arch = maple_setup_arch, - .discover_phbs = maple_pci_init, - .init_IRQ = maple_init_IRQ, - .pci_irq_fixup = maple_pci_irq_fixup, - .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, - .restart = maple_restart, - .halt = maple_halt, - .get_boot_time = maple_get_boot_time, - .set_rtc_time = maple_set_rtc_time, - .get_rtc_time = maple_get_rtc_time, - .calibrate_decr = generic_calibrate_decr, - .progress = maple_progress, - .power_save = power4_idle, -}; diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c deleted file mode 100644 index 823e219ef8ee..000000000000 --- a/arch/powerpc/platforms/maple/time.c +++ /dev/null @@ -1,169 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/init.h> -#include <linux/time.h> -#include <linux/adb.h> -#include <linux/pmu.h> -#include <linux/interrupt.h> -#include <linux/mc146818rtc.h> -#include <linux/bcd.h> -#include <linux/of_address.h> - -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/time.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -static int maple_rtc_addr; - -static int maple_clock_read(int addr) -{ - outb_p(addr, maple_rtc_addr); - return inb_p(maple_rtc_addr+1); -} - -static void maple_clock_write(unsigned long val, int addr) -{ - outb_p(addr, maple_rtc_addr); - outb_p(val, maple_rtc_addr+1); -} - -void maple_get_rtc_time(struct rtc_time *tm) -{ - do { - tm->tm_sec = maple_clock_read(RTC_SECONDS); - tm->tm_min = maple_clock_read(RTC_MINUTES); - tm->tm_hour = maple_clock_read(RTC_HOURS); - tm->tm_mday = maple_clock_read(RTC_DAY_OF_MONTH); - tm->tm_mon = maple_clock_read(RTC_MONTH); - tm->tm_year = maple_clock_read(RTC_YEAR); - } while (tm->tm_sec != maple_clock_read(RTC_SECONDS)); - - if (!(maple_clock_read(RTC_CONTROL) & RTC_DM_BINARY) - || RTC_ALWAYS_BCD) { - tm->tm_sec = bcd2bin(tm->tm_sec); - tm->tm_min = bcd2bin(tm->tm_min); - tm->tm_hour = bcd2bin(tm->tm_hour); - tm->tm_mday = bcd2bin(tm->tm_mday); - tm->tm_mon = bcd2bin(tm->tm_mon); - tm->tm_year = bcd2bin(tm->tm_year); - } - if ((tm->tm_year + 1900) < 1970) - tm->tm_year += 100; - - tm->tm_wday = -1; -} - -int maple_set_rtc_time(struct rtc_time *tm) -{ - unsigned char save_control, save_freq_select; - int sec, min, hour, mon, mday, year; - - spin_lock(&rtc_lock); - - save_control = maple_clock_read(RTC_CONTROL); /* tell the clock it's being set */ - - maple_clock_write((save_control|RTC_SET), RTC_CONTROL); - - save_freq_select = maple_clock_read(RTC_FREQ_SELECT); /* stop and reset prescaler */ - - maple_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - sec = tm->tm_sec; - min = tm->tm_min; - hour = tm->tm_hour; - mon = tm->tm_mon; - mday = tm->tm_mday; - year = tm->tm_year; - - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - sec = bin2bcd(sec); - min = bin2bcd(min); - hour = bin2bcd(hour); - mon = bin2bcd(mon); - mday = bin2bcd(mday); - year = bin2bcd(year); - } - maple_clock_write(sec, RTC_SECONDS); - maple_clock_write(min, RTC_MINUTES); - maple_clock_write(hour, RTC_HOURS); - maple_clock_write(mon, RTC_MONTH); - maple_clock_write(mday, RTC_DAY_OF_MONTH); - maple_clock_write(year, RTC_YEAR); - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - maple_clock_write(save_control, RTC_CONTROL); - maple_clock_write(save_freq_select, RTC_FREQ_SELECT); - - spin_unlock(&rtc_lock); - - return 0; -} - -static struct resource rtc_iores = { - .name = "rtc", - .flags = IORESOURCE_IO | IORESOURCE_BUSY, -}; - -time64_t __init maple_get_boot_time(void) -{ - struct rtc_time tm; - struct device_node *rtcs; - - rtcs = of_find_compatible_node(NULL, "rtc", "pnpPNP,b00"); - if (rtcs) { - struct resource r; - if (of_address_to_resource(rtcs, 0, &r)) { - printk(KERN_EMERG "Maple: Unable to translate RTC" - " address\n"); - goto bail; - } - if (!(r.flags & IORESOURCE_IO)) { - printk(KERN_EMERG "Maple: RTC address isn't PIO!\n"); - goto bail; - } - maple_rtc_addr = r.start; - printk(KERN_INFO "Maple: Found RTC at IO 0x%x\n", - maple_rtc_addr); - } - bail: - if (maple_rtc_addr == 0) { - maple_rtc_addr = RTC_PORT(0); /* legacy address */ - printk(KERN_INFO "Maple: No device node for RTC, assuming " - "legacy address (0x%x)\n", maple_rtc_addr); - } - - rtc_iores.start = maple_rtc_addr; - rtc_iores.end = maple_rtc_addr + 7; - request_resource(&ioport_resource, &rtc_iores); - - maple_get_rtc_time(&tm); - return rtc_tm_to_time64(&tm); -} - diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig index 6af443a1db99..cb2aff635bb0 100644 --- a/arch/powerpc/platforms/microwatt/Kconfig +++ b/arch/powerpc/platforms/microwatt/Kconfig @@ -1,11 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 config PPC_MICROWATT - depends on PPC_BOOK3S_64 && !SMP + depends on PPC_BOOK3S_64 bool "Microwatt SoC platform" select PPC_XICS select PPC_ICS_NATIVE select PPC_ICP_NATIVE select PPC_UDBG_16550 + select COMMON_CLK help This option enables support for FPGA-based Microwatt implementations. diff --git a/arch/powerpc/platforms/microwatt/Makefile b/arch/powerpc/platforms/microwatt/Makefile index 116d6d3ad3f0..d973b2ab4042 100644 --- a/arch/powerpc/platforms/microwatt/Makefile +++ b/arch/powerpc/platforms/microwatt/Makefile @@ -1 +1,2 @@ obj-y += setup.o rng.o +obj-$(CONFIG_SMP) += smp.o diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h index 335417e95e66..891aa2800768 100644 --- a/arch/powerpc/platforms/microwatt/microwatt.h +++ b/arch/powerpc/platforms/microwatt/microwatt.h @@ -3,5 +3,6 @@ #define _MICROWATT_H void microwatt_rng_init(void); +void microwatt_init_smp(void); #endif /* _MICROWATT_H */ diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c index 6b32539395a4..6af2ccef736c 100644 --- a/arch/powerpc/platforms/microwatt/setup.c +++ b/arch/powerpc/platforms/microwatt/setup.c @@ -23,27 +23,39 @@ static void __init microwatt_init_IRQ(void) xics_init(); } -static int __init microwatt_probe(void) -{ - return of_machine_is_compatible("microwatt-soc"); -} - static int __init microwatt_populate(void) { return of_platform_default_populate(NULL, NULL, NULL); } machine_arch_initcall(microwatt, microwatt_populate); +static int __init microwatt_probe(void) +{ + /* Main reason for having this is to start the other CPU(s) */ + if (IS_ENABLED(CONFIG_SMP)) + microwatt_init_smp(); + return 1; +} + static void __init microwatt_setup_arch(void) { microwatt_rng_init(); } +static void microwatt_idle(void) +{ + if (!prep_irq_for_idle_irqsoff()) + return; + + __asm__ __volatile__ ("wait"); +} + define_machine(microwatt) { .name = "microwatt", + .compatible = "microwatt-soc", .probe = microwatt_probe, .init_IRQ = microwatt_init_IRQ, .setup_arch = microwatt_setup_arch, .progress = udbg_progress, - .calibrate_decr = generic_calibrate_decr, + .power_save = microwatt_idle, }; diff --git a/arch/powerpc/platforms/microwatt/smp.c b/arch/powerpc/platforms/microwatt/smp.c new file mode 100644 index 000000000000..7dbf2ca73d47 --- /dev/null +++ b/arch/powerpc/platforms/microwatt/smp.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * SMP support functions for Microwatt + * Copyright 2025 Paul Mackerras <paulus@ozlabs.org> + */ + +#include <linux/kernel.h> +#include <linux/smp.h> +#include <linux/io.h> +#include <asm/early_ioremap.h> +#include <asm/ppc-opcode.h> +#include <asm/reg.h> +#include <asm/smp.h> +#include <asm/xics.h> + +#include "microwatt.h" + +static void __init microwatt_smp_probe(void) +{ + xics_smp_probe(); +} + +static void microwatt_smp_setup_cpu(int cpu) +{ + if (cpu != 0) + xics_setup_cpu(); +} + +static struct smp_ops_t microwatt_smp_ops = { + .probe = microwatt_smp_probe, + .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ + .kick_cpu = smp_generic_kick_cpu, + .setup_cpu = microwatt_smp_setup_cpu, +}; + +/* XXX get from device tree */ +#define SYSCON_BASE 0xc0000000 +#define SYSCON_LENGTH 0x100 + +#define SYSCON_CPU_CTRL 0x58 + +void __init microwatt_init_smp(void) +{ + volatile unsigned char __iomem *syscon; + int ncpus; + int timeout; + + syscon = early_ioremap(SYSCON_BASE, SYSCON_LENGTH); + if (syscon == NULL) { + pr_err("Failed to map SYSCON\n"); + return; + } + ncpus = (readl(syscon + SYSCON_CPU_CTRL) >> 8) & 0xff; + if (ncpus < 2) + goto out; + + smp_ops = µwatt_smp_ops; + + /* + * Write two instructions at location 0: + * mfspr r3, PIR + * b __secondary_hold + */ + *(unsigned int *)KERNELBASE = PPC_RAW_MFSPR(3, SPRN_PIR); + *(unsigned int *)(KERNELBASE+4) = PPC_RAW_BRANCH(&__secondary_hold - (char *)(KERNELBASE+4)); + + /* enable the other CPUs, they start at location 0 */ + writel((1ul << ncpus) - 1, syscon + SYSCON_CPU_CTRL); + + timeout = 10000; + while (!__secondary_hold_acknowledge) { + if (--timeout == 0) + break; + barrier(); + } + + out: + early_iounmap((void *)syscon, SYSCON_LENGTH); +} diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index bf300167ad6b..e4538d471256 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -20,7 +20,7 @@ #include <linux/phy.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #define DELAY 1 @@ -260,7 +260,7 @@ out: } -static int gpio_mdio_remove(struct platform_device *dev) +static void gpio_mdio_remove(struct platform_device *dev) { struct mii_bus *bus = dev_get_drvdata(&dev->dev); @@ -271,8 +271,6 @@ static int gpio_mdio_remove(struct platform_device *dev) kfree(bus->priv); bus->priv = NULL; mdiobus_free(bus); - - return 0; } static const struct of_device_id gpio_mdio_match[] = @@ -294,7 +292,7 @@ static struct platform_driver gpio_mdio_driver = }, }; -static int gpio_mdio_init(void) +static int __init gpio_mdio_init(void) { struct device_node *np; @@ -314,7 +312,7 @@ static int gpio_mdio_init(void) } module_init(gpio_mdio_init); -static void gpio_mdio_exit(void) +static void __exit gpio_mdio_exit(void) { platform_driver_unregister(&gpio_mdio_driver); if (gpio_regs) diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 0a38663d44ed..375487cba874 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -254,7 +254,7 @@ void __init iommu_init_early_pasemi(void) iommu_off = 1; #else iommu_off = of_chosen && - of_get_property(of_chosen, "linux,iommu-off", NULL); + of_property_read_bool(of_chosen, "linux,iommu-off"); #endif if (iommu_off) return; diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c index f859ada29074..9e9a7e46288a 100644 --- a/arch/powerpc/platforms/pasemi/misc.c +++ b/arch/powerpc/platforms/pasemi/misc.c @@ -36,8 +36,7 @@ static int __init find_i2c_driver(struct device_node *node, for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) { if (!of_device_is_compatible(node, i2c_devices[i].of_device)) continue; - if (strlcpy(info->type, i2c_devices[i].i2c_type, - I2C_NAME_SIZE) >= I2C_NAME_SIZE) + if (strscpy(info->type, i2c_devices[i].i2c_type, I2C_NAME_SIZE) < 0) return -ENOMEM; return 0; } diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c index dc1846660005..166c97fff16d 100644 --- a/arch/powerpc/platforms/pasemi/msi.c +++ b/arch/powerpc/platforms/pasemi/msi.c @@ -66,6 +66,7 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); + entry->irq = 0; msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK); } } diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h index 3f277a200fd8..6f6743b8e48d 100644 --- a/arch/powerpc/platforms/pasemi/pasemi.h +++ b/arch/powerpc/platforms/pasemi/pasemi.h @@ -4,7 +4,7 @@ extern time64_t pas_get_boot_time(void); extern void pas_pci_init(void); -extern void pas_pci_irq_fixup(struct pci_dev *dev); +struct pci_dev; extern void pas_pci_dma_dev_setup(struct pci_dev *dev); void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset); diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c index 55f0160910bf..60f990a336c4 100644 --- a/arch/powerpc/platforms/pasemi/pci.c +++ b/arch/powerpc/platforms/pasemi/pci.c @@ -270,15 +270,10 @@ static int __init pas_add_bridge(struct device_node *dev) void __init pas_pci_init(void) { - struct device_node *np, *root; + struct device_node *root = of_find_node_by_path("/"); + struct device_node *np; int res; - root = of_find_node_by_path("/"); - if (!root) { - pr_crit("pas_pci_init: can't find root of device tree\n"); - return; - } - pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS); np = of_find_compatible_node(root, NULL, "pasemi,rootbus"); @@ -286,6 +281,7 @@ void __init pas_pci_init(void) res = pas_add_bridge(np); of_node_put(np); } + of_node_put(root); } void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset) diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 2aef49e04dd4..d03b41336901 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -16,7 +16,9 @@ #include <linux/console.h> #include <linux/export.h> #include <linux/pci.h> +#include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/gfp.h> #include <linux/irqdomain.h> @@ -62,7 +64,7 @@ static void __noreturn pas_restart(char *cmd) } #ifdef CONFIG_PPC_PASEMI_NEMO -void pas_shutdown(void) +static void pas_shutdown(void) { /* Set the PLD bit that makes the SB600 think the power button is being pressed */ void __iomem *pld_map = ioremap(0xf5000000,4096); @@ -226,7 +228,7 @@ static void __init nemo_init_IRQ(struct mpic *mpic) irq_set_chained_handler(gpio_virq, sb600_8259_cascade); mpic_unmask_irq(irq_get_irq_data(gpio_virq)); - irq_set_default_host(mpic->irqhost); + irq_set_default_domain(mpic->irqhost); } #else @@ -449,7 +451,6 @@ define_machine(pasemi) { .get_irq = mpic_get_irq, .restart = pas_restart, .get_boot_time = pas_get_boot_time, - .calibrate_decr = generic_calibrate_decr, .progress = pas_progress, .machine_check_exception = pas_machine_check_handler, }; diff --git a/arch/powerpc/platforms/pasemi/time.c b/arch/powerpc/platforms/pasemi/time.c index ad721882c8b6..70ac6db027d0 100644 --- a/arch/powerpc/platforms/pasemi/time.c +++ b/arch/powerpc/platforms/pasemi/time.c @@ -9,6 +9,8 @@ #include <asm/time.h> +#include "pasemi.h" + time64_t __init pas_get_boot_time(void) { /* Let's just return a fake date right now */ diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig index 130707ec9f99..84f101ec53a9 100644 --- a/arch/powerpc/platforms/powermac/Kconfig +++ b/arch/powerpc/platforms/powermac/Kconfig @@ -2,6 +2,7 @@ config PPC_PMAC bool "Apple PowerMac based machines" depends on PPC_BOOK3S && CPU_BIG_ENDIAN + select ADB_CUDA if POWER_RESET && ADB select MPIC select FORCE_PCI select PPC_INDIRECT_PCI if PPC32 diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index aeb79a8b3e10..79741370c40c 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c @@ -9,7 +9,6 @@ */ #include <linux/kernel.h> -#include <linux/fb.h> #include <linux/backlight.h> #include <linux/adb.h> #include <linux/pmu.h> @@ -58,43 +57,10 @@ struct backlight_device *pmac_backlight; int pmac_has_backlight_type(const char *type) { struct device_node* bk_node = of_find_node_by_name(NULL, "backlight"); + int i = of_property_match_string(bk_node, "backlight-control", type); - if (bk_node) { - const char *prop = of_get_property(bk_node, - "backlight-control", NULL); - if (prop && strncmp(prop, type, strlen(type)) == 0) { - of_node_put(bk_node); - return 1; - } - of_node_put(bk_node); - } - - return 0; -} - -int pmac_backlight_curve_lookup(struct fb_info *info, int value) -{ - int level = (FB_BACKLIGHT_LEVELS - 1); - - if (info && info->bl_dev) { - int i, max = 0; - - /* Look for biggest value */ - for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) - max = max((int)info->bl_curve[i], max); - - /* Look for nearest value */ - for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) { - int diff = abs(info->bl_curve[i] - value); - if (diff < max) { - max = diff; - level = i; - } - } - - } - - return level; + of_node_put(bk_node); + return i >= 0; } static void pmac_backlight_key_worker(struct work_struct *work) diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 5cc958adba13..2cc257f75c50 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -37,6 +37,8 @@ #include <asm/pci-bridge.h> #include <asm/pmac_low_i2c.h> +#include "pmac.h" + #undef DEBUG_FEATURE #ifdef DEBUG_FEATURE @@ -132,8 +134,10 @@ static struct pmac_mb_def pmac_mb; * Here are the chip specific feature functions */ -static inline int simple_feature_tweak(struct device_node *node, int type, - int reg, u32 mask, int value) +#ifndef CONFIG_PPC64 + +static int simple_feature_tweak(struct device_node *node, int type, int reg, + u32 mask, int value) { struct macio_chip* macio; unsigned long flags; @@ -152,8 +156,6 @@ static inline int simple_feature_tweak(struct device_node *node, int type, return 0; } -#ifndef CONFIG_PPC64 - static long ohare_htw_scc_enable(struct device_node *node, long param, long value) { @@ -1053,11 +1055,11 @@ core99_reset_cpu(struct device_node *node, long param, long value) return -ENODEV; for_each_of_cpu_node(np) { - const u32 *num = of_get_property(np, "reg", NULL); const u32 *rst = of_get_property(np, "soft-reset", NULL); - if (num == NULL || rst == NULL) + if (!rst) continue; - if (param == *num) { + if (param == of_get_cpu_hwid(np, 0)) { + of_node_put(np); reset_io = *rst; break; } @@ -1499,11 +1501,11 @@ static long g5_reset_cpu(struct device_node *node, long param, long value) return -ENODEV; for_each_of_cpu_node(np) { - const u32 *num = of_get_property(np, "reg", NULL); const u32 *rst = of_get_property(np, "soft-reset", NULL); - if (num == NULL || rst == NULL) + if (!rst) continue; - if (param == *num) { + if (param == of_get_cpu_hwid(np, 0)) { + of_node_put(np); reset_io = *rst; break; } @@ -2331,7 +2333,6 @@ static struct pmac_mb_def pmac_mb_defs[] = { PMAC_TYPE_POWERMAC_G5, g5_features, 0, }, -#ifdef CONFIG_PPC64 { "PowerMac7,3", "PowerMac G5", PMAC_TYPE_POWERMAC_G5, g5_features, 0, @@ -2357,7 +2358,6 @@ static struct pmac_mb_def pmac_mb_defs[] = { 0, }, #endif /* CONFIG_PPC64 */ -#endif /* CONFIG_PPC64 */ }; /* @@ -2506,7 +2506,7 @@ found: int cpu_count = 1; /* Nap mode not supported on SMP */ - if (of_get_property(np, "flush-on-lock", NULL) || + if (of_property_read_bool(np, "flush-on-lock") || (cpu_count > 1)) { powersave_nap = 0; of_node_put(np); @@ -2545,8 +2545,7 @@ done: */ static void __init probe_uninorth(void) { - const u32 *addrp; - phys_addr_t address; + struct resource res; unsigned long actrl; /* Locate core99 Uni-N */ @@ -2568,18 +2567,15 @@ static void __init probe_uninorth(void) return; } - addrp = of_get_property(uninorth_node, "reg", NULL); - if (addrp == NULL) - return; - address = of_translate_address(uninorth_node, addrp); - if (address == 0) + if (of_address_to_resource(uninorth_node, 0, &res)) return; - uninorth_base = ioremap(address, 0x40000); + + uninorth_base = ioremap(res.start, 0x40000); if (uninorth_base == NULL) return; uninorth_rev = in_be32(UN_REG(UNI_N_VERSION)); if (uninorth_maj == 3 || uninorth_maj == 4) { - u3_ht_base = ioremap(address + U3_HT_CONFIG_BASE, 0x1000); + u3_ht_base = ioremap(res.start + U3_HT_CONFIG_BASE, 0x1000); if (u3_ht_base == NULL) { iounmap(uninorth_base); return; @@ -2589,7 +2585,7 @@ static void __init probe_uninorth(void) printk(KERN_INFO "Found %s memory controller & host bridge" " @ 0x%08x revision: 0x%02x\n", uninorth_maj == 3 ? "U3" : uninorth_maj == 4 ? "U4" : "UniNorth", - (unsigned int)address, uninorth_rev); + (unsigned int)res.start, uninorth_rev); printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base); /* Set the arbitrer QAck delay according to what Apple does @@ -2616,7 +2612,8 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ struct device_node* node; int i; volatile u32 __iomem *base; - const u32 *addrp, *revp; + const __be32 *addrp; + const u32 *revp; phys_addr_t addr; u64 size; @@ -2632,31 +2629,31 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ if (!macio_chips[i].of_node) break; if (macio_chips[i].of_node == node) - return; + goto out_put; } if (i >= MAX_MACIO_CHIPS) { printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); printk(KERN_ERR "pmac_feature: %pOF skipped\n", node); - return; + goto out_put; } addrp = of_get_pci_address(node, 0, &size, NULL); if (addrp == NULL) { printk(KERN_ERR "pmac_feature: %pOF: can't find base !\n", node); - return; + goto out_put; } addr = of_translate_address(node, addrp); if (addr == 0) { printk(KERN_ERR "pmac_feature: %pOF, can't translate base !\n", node); - return; + goto out_put; } base = ioremap(addr, (unsigned long)size); if (!base) { printk(KERN_ERR "pmac_feature: %pOF, can't map mac-io chip !\n", node); - return; + goto out_put; } if (type == macio_keylargo || type == macio_keylargo2) { const u32 *did = of_get_property(node, "device-id", NULL); @@ -2677,6 +2674,11 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ macio_chips[i].rev = *revp; printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n", macio_names[type], macio_chips[i].rev, macio_chips[i].base); + + return; + +out_put: + of_node_put(node); } static int __init diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index c1c430c66dc9..02474e27df9b 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -347,7 +347,7 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id) unsigned long flags; spin_lock_irqsave(&host->lock, flags); - del_timer(&host->timeout_timer); + timer_delete(&host->timeout_timer); kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); if (host->state != state_idle) { host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; @@ -359,7 +359,8 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id) static void kw_i2c_timeout(struct timer_list *t) { - struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer); + struct pmac_i2c_host_kw *host = timer_container_of(host, t, + timeout_timer); unsigned long flags; spin_lock_irqsave(&host->lock, flags); @@ -627,6 +628,7 @@ static void __init kw_i2c_probe(void) if (parent == NULL) continue; chans = parent->name[0] == 'u' ? 2 : 1; + of_node_put(parent); for (i = 0; i < chans; i++) kw_i2c_add(host, np, np, i); } else { @@ -924,8 +926,10 @@ static void __init smu_i2c_probe(void) sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd); bus = kzalloc(sz, GFP_KERNEL); - if (bus == NULL) + if (bus == NULL) { + of_node_put(busnode); return; + } bus->controller = controller; bus->busnode = of_node_get(busnode); diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index fe2e0249cbc2..a112d26185a0 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -514,10 +514,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } - nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); - if (!nvram_image) - panic("%s: Failed to allocate %u bytes\n", __func__, - NVRAM_SIZE); + nvram_image = memblock_alloc_or_panic(NVRAM_SIZE, SMP_CACHE_BYTES); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index 9c2947a3edd5..8253de737373 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -136,6 +136,8 @@ static void __init macio_gpio_init_one(struct macio_chip *macio) for_each_child_of_node(gparent, gp) pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL); + of_node_put(gparent); + /* Note: We do not at this point implement the "at sleep" or "at wake" * functions. I yet to find any for GPIOs anyway */ @@ -311,7 +313,7 @@ static void __init uninorth_install_pfunc(void) /* * Install handlers for the hwclock child if any */ - for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;) + for_each_child_of_node(uninorth_node, np) if (of_node_name_eq(np, "hw-clock")) { unin_hwclock = np; break; diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 8c8d8e0a7d13..c37783a03d25 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -2,7 +2,7 @@ /* * Support for the interrupt controllers found on Power Macintosh, * currently Apple's "Grand Central" interrupt controller in all - * it's incarnations. OpenPIC support used on newer machines is + * its incarnations. OpenPIC support used on newer machines is * in a separate file * * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) @@ -250,7 +250,7 @@ static unsigned int pmac_pic_get_irq(void) raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); if (unlikely(irq < 0)) return 0; - return irq_linear_revmap(pmac_pic_host, irq); + return irq_find_mapping(pmac_pic_host, irq); } static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, @@ -327,10 +327,11 @@ static void __init pmac_pic_probe_oldstyle(void) /* * Allocate an irq host */ - pmac_pic_host = irq_domain_add_linear(master, max_irqs, - &pmac_pic_host_ops, NULL); + pmac_pic_host = irq_domain_create_linear(of_fwnode_handle(master), + max_irqs, + &pmac_pic_host_ops, NULL); BUG_ON(pmac_pic_host == NULL); - irq_set_default_host(pmac_pic_host); + irq_set_default_domain(pmac_pic_host); /* Get addresses of first controller if we have a node for it */ BUG_ON(of_address_to_resource(master, 0, &r)); @@ -450,7 +451,7 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); - if (of_get_property(np, "big-endian", NULL)) + if (of_property_read_bool(np, "big-endian")) flags |= MPIC_BIG_ENDIAN; /* Primary Big Endian means HT interrupts. This is quite dodgy @@ -475,8 +476,7 @@ static int __init pmac_pic_probe_mpic(void) /* We can have up to 2 MPICs cascaded */ for_each_node_by_type(np, "open-pic") { - if (master == NULL && - of_get_property(np, "interrupts", NULL) == NULL) + if (master == NULL && !of_property_present(np, "interrupts")) master = of_node_get(np); else if (slave == NULL) slave = of_node_get(np); @@ -528,7 +528,7 @@ void __init pmac_pic_init(void) #ifdef CONFIG_PPC32 if (!pmac_newworld) of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC; - if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL) + if (of_property_read_bool(of_chosen, "linux,bootx")) of_irq_workarounds |= OF_IMAP_NO_PHANDLE; /* If we don't have phandles on a newworld, then try to locate a diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index f71735ec449f..e119ced05d10 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -45,7 +45,8 @@ #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> -#include <linux/of_device.h> +#include <linux/string_choices.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <asm/reg.h> @@ -70,15 +71,14 @@ #undef SHOW_GATWICK_IRQS -int ppc_override_l2cr = 0; -int ppc_override_l2cr_value; -int has_l2cache = 0; +static int has_l2cache; int pmac_newworld; static int current_root_goodness = -1; -#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */ +/* sda1 - slightly silly choice */ +#define DEFAULT_ROOT_DEVICE MKDEV(SCSI_DISK0_MAJOR, 1) sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; EXPORT_SYMBOL(sys_ctrler); @@ -139,7 +139,7 @@ static void pmac_show_cpuinfo(struct seq_file *m) of_get_property(np, "d-cache-size", NULL); seq_printf(m, "L2 cache\t:"); has_l2cache = 1; - if (of_get_property(np, "cache-unified", NULL) && dc) { + if (of_property_read_bool(np, "cache-unified") && dc) { seq_printf(m, " %dK unified", *dc / 1024); } else { if (ic) @@ -236,22 +236,15 @@ static void __init l2cr_init(void) const unsigned int *l2cr = of_get_property(np, "l2cr-value", NULL); if (l2cr) { - ppc_override_l2cr = 1; - ppc_override_l2cr_value = *l2cr; _set_L2CR(0); - _set_L2CR(ppc_override_l2cr_value); + _set_L2CR(*l2cr); + pr_info("L2CR overridden (0x%x), backside cache is %s\n", + *l2cr, str_enabled_disabled((*l2cr) & 0x80000000)); } of_node_put(np); break; } } - - if (ppc_override_l2cr) - printk(KERN_INFO "L2CR overridden (0x%x), " - "backside cache is %s\n", - ppc_override_l2cr_value, - (ppc_override_l2cr_value & 0x80000000) - ? "enabled" : "disabled"); } #endif @@ -320,13 +313,6 @@ static void __init pmac_setup_arch(void) #endif /* CONFIG_ADB */ } -#ifdef CONFIG_SCSI -void note_scsi_host(struct device_node *node, void *host) -{ -} -EXPORT_SYMBOL(note_scsi_host); -#endif - static int initializing = 1; static int pmac_late_init(void) diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index d497a60003d2..822ed70cdcbf 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -176,7 +176,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) * memory location containing the PC to resume from * at address 0. * - On Core99, we must store the wakeup vector at - * address 0x80 and eventually it's parameters + * address 0x80 and eventually its parameters * at address 0x84. I've have some trouble with those * parameters however and I no longer use them. */ diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index d9df45741ece..88e92af8acf9 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -35,7 +35,7 @@ #include <asm/ptrace.h> #include <linux/atomic.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/sections.h> @@ -190,7 +190,7 @@ static int __init psurge_secondary_ipi_init(void) { int rc = -ENOMEM; - psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL); + psurge_host = irq_domain_create_nomap(NULL, ~0, &psurge_host_ops, NULL); if (psurge_host) psurge_secondary_virq = irq_create_direct_mapping(psurge_host); @@ -413,7 +413,7 @@ static void __init smp_psurge_setup_cpu(int cpu_nr) printk(KERN_ERR "Couldn't get primary IPI interrupt"); } -void __init smp_psurge_take_timebase(void) +static void __init smp_psurge_take_timebase(void) { if (psurge_type != PSURGE_DUAL) return; @@ -429,7 +429,7 @@ void __init smp_psurge_take_timebase(void) set_dec(tb_ticks_per_jiffy/2); } -void __init smp_psurge_give_timebase(void) +static void __init smp_psurge_give_timebase(void) { /* Nothing to do here */ } @@ -598,8 +598,10 @@ static void __init smp_core99_setup_i2c_hwsync(int ncpus) name = "Pulsar"; break; } - if (pmac_tb_freeze != NULL) + if (pmac_tb_freeze != NULL) { + of_node_put(cc); break; + } } if (pmac_tb_freeze != NULL) { /* Open i2c bus for synchronous access */ @@ -706,11 +708,12 @@ static void __init smp_core99_setup(int ncpus) struct device_node *cpus = of_find_node_by_path("/cpus"); if (cpus && - of_get_property(cpus, "platform-cpu-timebase", NULL)) { + of_property_read_bool(cpus, "platform-cpu-timebase")) { pmac_tb_freeze = smp_core99_pfunc_tb_freeze; printk(KERN_INFO "Processor timebase sync using" " platform function\n"); } + of_node_put(cpus); } #else /* CONFIG_PPC64 */ @@ -824,7 +827,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - patch_instruction(vector, ppc_inst(save_vector)); + patch_uint(vector, save_vector); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 4c5790aff1b5..b4426a35aca3 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/time.h> @@ -26,8 +27,8 @@ #include <linux/rtc.h> #include <linux/of_address.h> +#include <asm/early_ioremap.h> #include <asm/sections.h> -#include <asm/io.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/nvram.h> @@ -77,7 +78,7 @@ long __init pmac_time_init(void) delta |= 0xFF000000UL; dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0); printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60, - dst ? "on" : "off"); + str_on_off(dst)); #endif return delta; } @@ -182,7 +183,7 @@ static int __init via_calibrate_decr(void) return 0; } of_node_put(vias); - via = ioremap(rsrc.start, resource_size(&rsrc)); + via = early_ioremap(rsrc.start, resource_size(&rsrc)); if (via == NULL) { printk(KERN_ERR "Failed to map VIA for timer calibration !\n"); return 0; @@ -207,7 +208,7 @@ static int __init via_calibrate_decr(void) ppc_tb_freq = (dstart - dend) * 100 / 6; - iounmap(via); + early_iounmap((void *)via, resource_size(&rsrc)); return 1; } diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 734df5a32f99..1b7c39e841ee 100644 --- a/arch/powerpc/platforms/powermac/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c @@ -81,10 +81,14 @@ void __init udbg_scc_init(int force_scc) if (path != NULL) stdout = of_find_node_by_path(path); for_each_child_of_node(escc, ch) { - if (ch == stdout) + if (ch == stdout) { + of_node_put(ch_def); ch_def = of_node_get(ch); - if (of_node_name_eq(ch, "ch-a")) + } + if (of_node_name_eq(ch, "ch-a")) { + of_node_put(ch_a); ch_a = of_node_get(ch); + } } if (ch_def == NULL && !force_scc) goto bail; diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index e1a05c5a9004..95d7ba73d43d 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -16,10 +16,12 @@ config PPC_POWERNV select PPC_DOORBELL select MMU_NOTIFIER select FORCE_SMP + select ARCH_SUPPORTS_PER_VMA_LOCK + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU default y config OPAL_PRD - tristate 'OPAL PRD driver' + tristate "OPAL PRD driver" depends on PPC_POWERNV help This enables the opal-prd driver, a facility to run processor diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 19f0fc5c6f1b..9e5d0c847ee2 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o obj-$(CONFIG_OPAL_CORE) += opal-core.o obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o obj-$(CONFIG_PCI_IOV) += pci-sriov.o -obj-$(CONFIG_CXL_BASE) += pci-cxl.o obj-$(CONFIG_EEH) += eeh-powernv.o obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o obj-$(CONFIG_OPAL_PRD) += opal-prd.o diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index a83cb679dd59..db3370d1673c 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -99,7 +99,6 @@ static ssize_t pnv_eeh_ei_write(struct file *filp, static const struct file_operations pnv_eeh_ei_fops = { .open = simple_open, - .llseek = no_llseek, .write = pnv_eeh_ei_write, }; @@ -855,13 +854,12 @@ static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; struct device_node *dn = pci_device_to_OF_node(pdev); - uint64_t id = PCI_SLOT_ID(phb->opal_id, - (pdev->bus->number << 8) | pdev->devfn); + uint64_t id = PCI_SLOT_ID(phb->opal_id, pci_dev_id(pdev)); uint8_t scope; int64_t rc; /* Hot reset to the bus if firmware cannot handle */ - if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) + if (!dn || !of_property_present(dn, "ibm,reset-by-firmware")) return __pnv_eeh_bridge_reset(pdev, option); pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 6f94b808dd39..d98b933e4984 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -18,7 +18,7 @@ #include <asm/opal.h> #include <asm/cputhreads.h> #include <asm/cpuidle.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/smp.h> #include <asm/runlatch.h> #include <asm/dbell.h> @@ -246,9 +246,9 @@ static inline void atomic_lock_thread_idle(void) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); - unsigned long *state = &paca_ptrs[first]->idle_state; + unsigned long *lock = &paca_ptrs[first]->idle_lock; - while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state))) + while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, lock))) barrier(); } @@ -258,29 +258,31 @@ static inline void atomic_unlock_and_stop_thread_idle(void) int first = cpu_first_thread_sibling(cpu); unsigned long thread = 1UL << cpu_thread_in_core(cpu); unsigned long *state = &paca_ptrs[first]->idle_state; + unsigned long *lock = &paca_ptrs[first]->idle_lock; u64 s = READ_ONCE(*state); u64 new, tmp; - BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT)); + BUG_ON(!(READ_ONCE(*lock) & PNV_CORE_IDLE_LOCK_BIT)); BUG_ON(s & thread); again: - new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT; + new = s | thread; tmp = cmpxchg(state, s, new); if (unlikely(tmp != s)) { s = tmp; goto again; } + clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, lock); } static inline void atomic_unlock_thread_idle(void) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); - unsigned long *state = &paca_ptrs[first]->idle_state; + unsigned long *lock = &paca_ptrs[first]->idle_lock; - BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state)); - clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state); + BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, lock)); + clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, lock); } /* P7 and P8 */ @@ -1411,7 +1413,7 @@ static int __init pnv_parse_cpuidle_dt(void) goto out; } for (i = 0; i < nr_idle_states; i++) - strlcpy(pnv_idle_states[i].name, temp_string[i], + strscpy(pnv_idle_states[i].name, temp_string[i], PNV_IDLE_NAME_LEN); nr_pnv_idle_states = nr_idle_states; rc = 0; @@ -1419,6 +1421,7 @@ out: kfree(temp_u32); kfree(temp_u64); kfree(temp_string); + of_node_put(np); return rc; } @@ -1463,14 +1466,19 @@ static int __init pnv_init_idle_states(void) power7_fastsleep_workaround_entry = false; power7_fastsleep_workaround_exit = false; } else { + struct device *dev_root; /* * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that * workaround is needed to use fastsleep. Provide sysfs * control to choose how this workaround has to be * applied. */ - device_create_file(cpu_subsys.dev_root, - &dev_attr_fastsleep_workaround_applyonce); + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + device_create_file(dev_root, + &dev_attr_fastsleep_workaround_applyonce); + put_device(dev_root); + } } update_subcore_sibling_mask(); diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 877720c64515..2ea30b343354 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -48,11 +48,15 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf, static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma) { struct memtrace_entry *ent = filp->private_data; + unsigned long ent_nrpages = ent->size >> PAGE_SHIFT; + unsigned long vma_nrpages = vma_pages(vma); - if (ent->size < vma->vm_end - vma->vm_start) + /* The requested page offset should be within object's page count */ + if (vma->vm_pgoff >= ent_nrpages) return -EINVAL; - if (vma->vm_pgoff << PAGE_SHIFT >= ent->size) + /* The requested mapping range should remain within the bounds */ + if (vma_nrpages > ent_nrpages - vma->vm_pgoff) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -88,26 +92,6 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, } } -static void memtrace_clear_range(unsigned long start_pfn, - unsigned long nr_pages) -{ - unsigned long pfn; - - /* As HIGHMEM does not apply, use clear_page() directly. */ - for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { - if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) - cond_resched(); - clear_page(__va(PFN_PHYS(pfn))); - } - /* - * Before we go ahead and use this range as cache inhibited range - * flush the cache. - */ - flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), - (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), - FLUSH_CHUNK_SIZE); -} - static u64 memtrace_alloc_node(u32 nid, u64 size) { const unsigned long nr_pages = PHYS_PFN(size); @@ -119,17 +103,18 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) * by alloc_contig_pages(). */ page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | - __GFP_NOWARN, nid, NULL); + __GFP_NOWARN | __GFP_ZERO, nid, NULL); if (!page) return 0; start_pfn = page_to_pfn(page); /* - * Clear the range while we still have a linear mapping. - * - * TODO: use __GFP_ZERO with alloc_contig_pages() once supported. + * Before we go ahead and use this range as cache inhibited range + * flush the cache. */ - memtrace_clear_range(start_pfn, nr_pages); + flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), + (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), + FLUSH_CHUNK_SIZE); /* * Set pages PageOffline(), to indicate that nobody (e.g., hibernation, diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c index 27c936075031..64a9c7125c29 100644 --- a/arch/powerpc/platforms/powernv/ocxl.c +++ b/arch/powerpc/platforms/powernv/ocxl.c @@ -449,7 +449,7 @@ int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, if (!data) return -ENOMEM; - bdfn = (dev->bus->number << 8) | dev->devfn; + bdfn = pci_dev_id(dev); rc = opal_npu_spa_setup(phb->opal_id, bdfn, virt_to_phys(spa_mem), PE_mask); if (rc) { @@ -478,10 +478,8 @@ EXPORT_SYMBOL_GPL(pnv_ocxl_spa_release); int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle) { struct spa_data *data = (struct spa_data *) platform_data; - int rc; - rc = opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle); - return rc; + return opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle); } EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe_from_cache); diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c index f812c74c61e5..021b0ec29e24 100644 --- a/arch/powerpc/platforms/powernv/opal-call.c +++ b/arch/powerpc/platforms/powernv/opal-call.c @@ -167,8 +167,6 @@ OPAL_CALL(opal_pci_map_pe_mmio_window, OPAL_PCI_MAP_PE_MMIO_WINDOW); OPAL_CALL(opal_pci_set_phb_table_memory, OPAL_PCI_SET_PHB_TABLE_MEMORY); OPAL_CALL(opal_pci_set_pe, OPAL_PCI_SET_PE); OPAL_CALL(opal_pci_set_peltv, OPAL_PCI_SET_PELTV); -OPAL_CALL(opal_pci_set_mve, OPAL_PCI_SET_MVE); -OPAL_CALL(opal_pci_set_mve_enable, OPAL_PCI_SET_MVE_ENABLE); OPAL_CALL(opal_pci_get_xive_reissue, OPAL_PCI_GET_XIVE_REISSUE); OPAL_CALL(opal_pci_set_xive_reissue, OPAL_PCI_SET_XIVE_REISSUE); OPAL_CALL(opal_pci_set_xive_pe, OPAL_PCI_SET_XIVE_PE); diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c index adcb1a1a2bfe..e652da8f986f 100644 --- a/arch/powerpc/platforms/powernv/opal-core.c +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -16,7 +16,7 @@ #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/slab.h> -#include <linux/crash_core.h> +#include <linux/vmcore_info.h> #include <linux/of.h> #include <asm/page.h> @@ -149,7 +149,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf, /* end of vector */ bufp[idx++] = cpu_to_be64(AT_NULL); - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_AUXV, + buf = append_elf64_note(buf, NN_AUXV, NT_AUXV, oc_conf->auxv_buf, AUXV_DESC_SZ); return buf; } @@ -159,7 +159,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf, * Returns number of bytes read on success, -errno on failure. */ static ssize_t read_opalcore(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { struct opalcore *m; @@ -206,9 +206,9 @@ static ssize_t read_opalcore(struct file *file, struct kobject *kobj, return (tpos - pos); } -static struct bin_attribute opal_core_attr = { +static struct bin_attribute opal_core_attr __ro_after_init = { .attr = {.name = "core", .mode = 0400}, - .read = read_opalcore + .read_new = read_opalcore }; /* @@ -252,7 +252,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) * crashing CPU's prstatus. */ first_cpu_note = buf; - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, + buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); for (i = 0; i < oc_conf->num_cpus; i++, bufp += size_per_thread) { @@ -279,7 +279,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) fill_prstatus(&prstatus, thread_pir, ®s); if (thread_pir != oc_conf->crashing_cpu) { - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, + buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); } else { @@ -287,7 +287,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) * Add crashing CPU as the first NT_PRSTATUS note for * GDB to process the core file appropriately. */ - append_elf64_note(first_cpu_note, CRASH_CORE_NOTE_NAME, + append_elf64_note(first_cpu_note, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); } @@ -348,6 +348,8 @@ static int __init create_opalcore(void) if (!dn || ret) pr_warn("WARNING: Failed to read OPAL base & entry values\n"); + of_node_put(dn); + /* Use count to keep track of the program headers */ count = 0; @@ -597,7 +599,7 @@ static struct attribute *mpipl_attr[] = { NULL, }; -static struct bin_attribute *mpipl_bin_attr[] = { +static const struct bin_attribute *const mpipl_bin_attr[] = { &opal_core_attr, NULL, @@ -605,7 +607,7 @@ static struct bin_attribute *mpipl_bin_attr[] = { static const struct attribute_group mpipl_group = { .attrs = mpipl_attr, - .bin_attrs = mpipl_bin_attr, + .bin_attrs_new = mpipl_bin_attr, }; static int __init opalcore_init(void) diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index 16c5860f1372..27e25693cf39 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c @@ -210,7 +210,7 @@ static struct attribute *dump_default_attrs[] = { }; ATTRIBUTE_GROUPS(dump_default); -static struct kobj_type dump_ktype = { +static const struct kobj_type dump_ktype = { .sysfs_ops = &dump_sysfs_ops, .release = &dump_release, .default_groups = dump_default_groups, @@ -286,7 +286,7 @@ out: } static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { ssize_t rc; @@ -342,7 +342,7 @@ static void create_dump_obj(uint32_t id, size_t size, uint32_t type) dump->dump_attr.attr.name = "dump"; dump->dump_attr.attr.mode = 0400; dump->dump_attr.size = size; - dump->dump_attr.read = dump_attr_read; + dump->dump_attr.read_new = dump_attr_read; dump->id = id; dump->size = size; diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 554fdd7f88b8..de33f354e9fd 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -146,7 +146,7 @@ static struct attribute *elog_default_attrs[] = { }; ATTRIBUTE_GROUPS(elog_default); -static struct kobj_type elog_ktype = { +static const struct kobj_type elog_ktype = { .sysfs_ops = &elog_sysfs_ops, .release = &elog_release, .default_groups = elog_default_groups, @@ -156,7 +156,7 @@ static struct kobj_type elog_ktype = { #define OPAL_MAX_ERRLOG_SIZE 16384 static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int opal_rc; @@ -203,7 +203,7 @@ static void create_elog_obj(uint64_t id, size_t size, uint64_t type) elog->raw_attr.attr.name = "raw"; elog->raw_attr.attr.mode = 0400; elog->raw_attr.size = size; - elog->raw_attr.read = raw_attr_read; + elog->raw_attr.read_new = raw_attr_read; elog->id = id; elog->size = size; diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c index 964f464b1b0e..c9c1dfb35464 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.c +++ b/arch/powerpc/platforms/powernv/opal-fadump.c @@ -513,8 +513,8 @@ out: final_note(note_buf); pr_debug("Updating elfcore header (%llx) with cpu notes\n", - fdh->elfcorehdr_addr); - fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); + fadump_conf->elfcorehdr_addr); + fadump_update_elfcore_header((char *)fadump_conf->elfcorehdr_addr); return 0; } @@ -526,12 +526,7 @@ static int __init opal_fadump_process(struct fw_dump *fadump_conf) if (!opal_fdm_active || !fadump_conf->fadumphdr_addr) return rc; - /* Validate the fadump crash info header */ fdh = __va(fadump_conf->fadumphdr_addr); - if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { - pr_err("Crash info header is not valid.\n"); - return rc; - } #ifdef CONFIG_OPAL_CORE /* @@ -545,18 +540,7 @@ static int __init opal_fadump_process(struct fw_dump *fadump_conf) kernel_initiated = true; #endif - rc = opal_fadump_build_cpu_notes(fadump_conf, fdh); - if (rc) - return rc; - - /* - * We are done validating dump info and elfcore header is now ready - * to be exported. set elfcorehdr_addr so that vmcore module will - * export the elfcore header through '/proc/vmcore'. - */ - elfcorehdr_addr = fdh->elfcorehdr_addr; - - return rc; + return opal_fadump_build_cpu_notes(fadump_conf, fdh); } static void opal_fadump_region_show(struct fw_dump *fadump_conf, @@ -615,6 +599,12 @@ static void opal_fadump_trigger(struct fadump_crash_info_header *fdh, pr_emerg("No backend support for MPIPL!\n"); } +/* FADUMP_MAX_MEM_REGS or lower */ +static int opal_fadump_max_boot_mem_rgns(void) +{ + return FADUMP_MAX_MEM_REGS; +} + static struct fadump_ops opal_fadump_ops = { .fadump_init_mem_struct = opal_fadump_init_mem_struct, .fadump_get_metadata_size = opal_fadump_get_metadata_size, @@ -627,6 +617,7 @@ static struct fadump_ops opal_fadump_ops = { .fadump_process = opal_fadump_process, .fadump_region_show = opal_fadump_region_show, .fadump_trigger = opal_fadump_trigger, + .fadump_max_boot_mem_rgns = opal_fadump_max_boot_mem_rgns, }; void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) @@ -674,8 +665,10 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) } } - fadump_conf->ops = &opal_fadump_ops; - fadump_conf->fadump_supported = 1; + fadump_conf->ops = &opal_fadump_ops; + fadump_conf->fadump_supported = 1; + /* TODO: Add support to pass additional parameters */ + fadump_conf->param_area_supported = 0; /* * Firmware supports 32-bit field for size. Align it to PAGE_SIZE diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h index 3f715efb0aa6..5eeb794b5eb1 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.h +++ b/arch/powerpc/platforms/powernv/opal-fadump.h @@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt, for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) { reg_entry = (struct hdat_fadump_reg_entry *)bufp; val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) : - (u64)(reg_entry->reg_val)); + (u64 __force)(reg_entry->reg_val)); opal_fadump_set_regval_regnum(regs, be32_to_cpu(reg_entry->reg_type), be32_to_cpu(reg_entry->reg_num), diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c index d5ea04e8e4c5..fd8c8621e973 100644 --- a/arch/powerpc/platforms/powernv/opal-flash.c +++ b/arch/powerpc/platforms/powernv/opal-flash.c @@ -432,7 +432,7 @@ static int alloc_image_buf(char *buffer, size_t count) * and pre-allocate required memory. */ static ssize_t image_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int rc; @@ -493,7 +493,7 @@ out: static const struct bin_attribute image_data_attr = { .attr = {.name = "image", .mode = 0200}, .size = MAX_IMAGE_SIZE, /* Limit image size */ - .write = image_data_write, + .write_new = image_data_write, }; static struct kobj_attribute validate_attribute = diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 348a8cdaecd6..828fc4d88471 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -11,7 +11,6 @@ #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <linux/crash_dump.h> #include <linux/debugfs.h> #include <asm/opal.h> diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index d55652b5f6fa..e180bd8e1400 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -59,7 +59,7 @@ again: cond_resched(); } - last_outstanding_events = 0; + WRITE_ONCE(last_outstanding_events, 0); if (opal_poll_events(&events) != OPAL_SUCCESS) return; e = be64_to_cpu(events) & opal_event_irqchip.mask; @@ -69,7 +69,7 @@ again: bool opal_have_pending_events(void) { - if (last_outstanding_events & opal_event_irqchip.mask) + if (READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask) return true; return false; } @@ -124,7 +124,7 @@ static irqreturn_t opal_interrupt(int irq, void *data) __be64 events; opal_handle_interrupt(virq_to_hw(irq), &events); - last_outstanding_events = be64_to_cpu(events); + WRITE_ONCE(last_outstanding_events, be64_to_cpu(events)); if (opal_have_pending_events()) opal_wake_poller(); @@ -191,7 +191,8 @@ int __init opal_event_init(void) * fall back to the legacy method (opal_event_request(...)) * anyway. */ dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event"); - opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS, + opal_event_irqchip.domain = irq_domain_create_linear(of_fwnode_handle(dn), + MAX_NUM_EVENTS, &opal_event_domain_ops, &opal_event_irqchip); of_node_put(dn); if (!opal_event_irqchip.domain) { @@ -275,11 +276,14 @@ int __init opal_event_init(void) else name = kasprintf(GFP_KERNEL, "opal"); + if (!name) + continue; /* Install interrupt handler */ rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK, name, NULL); if (rc) { pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start); + kfree(name); continue; } } diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c index 6c3bc4b4da98..bb4218fa796e 100644 --- a/arch/powerpc/platforms/powernv/opal-kmsg.c +++ b/arch/powerpc/platforms/powernv/opal-kmsg.c @@ -20,13 +20,13 @@ * message, it just ensures that OPAL completely flushes the console buffer. */ static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { /* * Outside of a panic context the pollers will continue to run, * so we don't need to do any special flushing. */ - if (reason != KMSG_DUMP_PANIC) + if (detail->reason != KMSG_DUMP_PANIC) return; opal_flush_console(0); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index d129d6d45a50..8a7f39e106bd 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -393,7 +393,7 @@ void __init opal_lpc_init(void) for_each_compatible_node(np, NULL, "ibm,power8-lpc") { if (!of_device_is_available(np)) continue; - if (!of_get_property(np, "primary", NULL)) + if (!of_property_present(np, "primary")) continue; opal_lpc_chip_id = of_get_ibm_chip_id(np); of_node_put(np); @@ -403,7 +403,7 @@ void __init opal_lpc_init(void) return; /* Does it support direct mapping ? */ - if (of_get_property(np, "ranges", NULL)) { + if (of_property_present(np, "ranges")) { pr_info("OPAL: Found memory mapped LPC bus on chip %d\n", opal_lpc_chip_id); isa_bridge_init_non_pci(np); diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index 22d6efe17b0d..f1988d0ab45c 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -94,15 +94,15 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) } static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { return opal_msglog_copy(to, pos, count); } -static struct bin_attribute opal_msglog_attr = { +static struct bin_attribute opal_msglog_attr __ro_after_init = { .attr = {.name = "msglog", .mode = 0400}, - .read = opal_msglog_read + .read_new = opal_msglog_read }; struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name) diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c index 64506b46e77b..ea917266aa17 100644 --- a/arch/powerpc/platforms/powernv/opal-powercap.c +++ b/arch/powerpc/platforms/powernv/opal-powercap.c @@ -153,7 +153,7 @@ void __init opal_powercap_init(void) pcaps = kcalloc(of_get_child_count(powercap), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return; + goto out_put_powercap; powercap_kobj = kobject_create_and_add("powercap", opal_kobj); if (!powercap_kobj) { @@ -196,6 +196,12 @@ void __init opal_powercap_init(void) j = 0; pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node); + if (!pcaps[i].pg.name) { + kfree(pcaps[i].pattrs); + kfree(pcaps[i].pg.attrs); + goto out_pcaps_pattrs; + } + if (has_min) { powercap_add_attr(min, "powercap-min", &pcaps[i].pattrs[j]); @@ -226,6 +232,7 @@ void __init opal_powercap_init(void) } i++; } + of_node_put(powercap); return; @@ -236,6 +243,9 @@ out_pcaps_pattrs: kfree(pcaps[i].pg.name); } kobject_put(powercap_kobj); + of_node_put(node); out_pcaps: kfree(pcaps); +out_put_powercap: + of_node_put(powercap); } diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 113bdb151f68..dc246ed4b7b4 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -24,13 +24,20 @@ #include <linux/uaccess.h> +struct opal_prd_msg { + union { + struct opal_prd_msg_header header; + DECLARE_FLEX_ARRAY(u8, data); + }; +}; + /* * The msg member must be at the end of the struct, as it's followed by the * message data. */ struct opal_prd_msg_queue_item { - struct list_head list; - struct opal_prd_msg_header msg; + struct list_head list; + struct opal_prd_msg msg; }; static struct device_node *prd_node; @@ -59,6 +66,8 @@ static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size) const char *label; addrp = of_get_address(node, 0, &range_size, NULL); + if (!addrp) + continue; range_addr = of_read_number(addrp, 2); range_end = range_addr + range_size; @@ -156,7 +165,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf, int rc; /* we need at least a header's worth of data */ - if (count < sizeof(item->msg)) + if (count < sizeof(item->msg.header)) return -EINVAL; if (*ppos) @@ -186,7 +195,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf, return -EINTR; } - size = be16_to_cpu(item->msg.size); + size = be16_to_cpu(item->msg.header.size); if (size > count) { err = -EINVAL; goto err_requeue; @@ -214,8 +223,8 @@ static ssize_t opal_prd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct opal_prd_msg_header hdr; + struct opal_prd_msg *msg; ssize_t size; - void *msg; int rc; size = sizeof(hdr); @@ -247,12 +256,12 @@ static ssize_t opal_prd_write(struct file *file, const char __user *buf, static int opal_prd_release(struct inode *inode, struct file *file) { - struct opal_prd_msg_header msg; + struct opal_prd_msg msg; - msg.size = cpu_to_be16(sizeof(msg)); - msg.type = OPAL_PRD_MSG_TYPE_FINI; + msg.header.size = cpu_to_be16(sizeof(msg)); + msg.header.type = OPAL_PRD_MSG_TYPE_FINI; - opal_prd_msg((struct opal_prd_msg *)&msg); + opal_prd_msg(&msg); atomic_xchg(&prd_usage, 0); @@ -352,7 +361,7 @@ static int opal_prd_msg_notifier(struct notifier_block *nb, if (!item) return -ENOMEM; - memcpy(&item->msg, msg->params, msg_size); + memcpy(&item->msg.data, msg->params, msg_size); spin_lock_irqsave(&opal_prd_msg_queue_lock, flags); list_add_tail(&item->list, &opal_prd_msg_queue); @@ -416,12 +425,11 @@ static int opal_prd_probe(struct platform_device *pdev) return 0; } -static int opal_prd_remove(struct platform_device *pdev) +static void opal_prd_remove(struct platform_device *pdev) { misc_deregister(&opal_prd_dev); opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb); opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2); - return 0; } static const struct of_device_id opal_prd_match[] = { @@ -435,7 +443,7 @@ static struct platform_driver opal_prd_driver = { .of_match_table = opal_prd_match, }, .probe = opal_prd_probe, - .remove = opal_prd_remove, + .remove = opal_prd_remove, }; module_platform_driver(opal_prd_driver); diff --git a/arch/powerpc/platforms/powernv/opal-psr.c b/arch/powerpc/platforms/powernv/opal-psr.c index 69d7e75950d1..6441e17b6996 100644 --- a/arch/powerpc/platforms/powernv/opal-psr.c +++ b/arch/powerpc/platforms/powernv/opal-psr.c @@ -135,7 +135,7 @@ void __init opal_psr_init(void) psr_attrs = kcalloc(of_get_child_count(psr), sizeof(*psr_attrs), GFP_KERNEL); if (!psr_attrs) - return; + goto out_put_psr; psr_kobj = kobject_create_and_add("psr", opal_kobj); if (!psr_kobj) { @@ -162,10 +162,14 @@ void __init opal_psr_init(void) } i++; } + of_node_put(psr); return; out_kobj: + of_node_put(node); kobject_put(psr_kobj); out: kfree(psr_attrs); +out_put_psr: + of_node_put(psr); } diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index a9bcf9217e64..79011a263aa6 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c @@ -11,8 +11,9 @@ #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/delay.h> -#include <linux/platform_device.h> +#include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/opal.h> #include <asm/firmware.h> diff --git a/arch/powerpc/platforms/powernv/opal-secvar.c b/arch/powerpc/platforms/powernv/opal-secvar.c index 14133e120bdd..6ac410f4d3c7 100644 --- a/arch/powerpc/platforms/powernv/opal-secvar.c +++ b/arch/powerpc/platforms/powernv/opal-secvar.c @@ -12,8 +12,8 @@ #define pr_fmt(fmt) "secvar: "fmt #include <linux/types.h> +#include <linux/of.h> #include <linux/platform_device.h> -#include <linux/of_platform.h> #include <asm/opal.h> #include <asm/secvar.h> #include <asm/secure_boot.h> @@ -54,8 +54,7 @@ static int opal_status_to_err(int rc) return err; } -static int opal_get_variable(const char *key, uint64_t ksize, - u8 *data, uint64_t *dsize) +static int opal_get_variable(const char *key, u64 ksize, u8 *data, u64 *dsize) { int rc; @@ -71,8 +70,7 @@ static int opal_get_variable(const char *key, uint64_t ksize, return opal_status_to_err(rc); } -static int opal_get_next_variable(const char *key, uint64_t *keylen, - uint64_t keybufsize) +static int opal_get_next_variable(const char *key, u64 *keylen, u64 keybufsize) { int rc; @@ -88,8 +86,7 @@ static int opal_get_next_variable(const char *key, uint64_t *keylen, return opal_status_to_err(rc); } -static int opal_set_variable(const char *key, uint64_t ksize, u8 *data, - uint64_t dsize) +static int opal_set_variable(const char *key, u64 ksize, u8 *data, u64 dsize) { int rc; @@ -101,10 +98,57 @@ static int opal_set_variable(const char *key, uint64_t ksize, u8 *data, return opal_status_to_err(rc); } +static ssize_t opal_secvar_format(char *buf, size_t bufsize) +{ + ssize_t rc = 0; + struct device_node *node; + const char *format; + + node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend"); + if (!of_device_is_available(node)) { + rc = -ENODEV; + goto out; + } + + rc = of_property_read_string(node, "format", &format); + if (rc) + goto out; + + rc = snprintf(buf, bufsize, "%s", format); + +out: + of_node_put(node); + + return rc; +} + +static int opal_secvar_max_size(u64 *max_size) +{ + int rc; + struct device_node *node; + + node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend"); + if (!node) + return -ENODEV; + + if (!of_device_is_available(node)) { + rc = -ENODEV; + goto out; + } + + rc = of_property_read_u64(node, "max-var-size", max_size); + +out: + of_node_put(node); + return rc; +} + static const struct secvar_operations opal_secvar_ops = { .get = opal_get_variable, .get_next = opal_get_next_variable, .set = opal_set_variable, + .format = opal_secvar_format, + .max_size = opal_secvar_max_size, }; static int opal_secvar_probe(struct platform_device *pdev) @@ -116,9 +160,7 @@ static int opal_secvar_probe(struct platform_device *pdev) return -ENODEV; } - set_secvar_ops(&opal_secvar_ops); - - return 0; + return set_secvar_ops(&opal_secvar_ops); } static const struct of_device_id opal_secvar_match[] = { diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c index 8fba7d25ae56..9944376b115c 100644 --- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c +++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c @@ -170,7 +170,7 @@ void __init opal_sensor_groups_init(void) sgs = kcalloc(of_get_child_count(sg), sizeof(*sgs), GFP_KERNEL); if (!sgs) - return; + goto out_sg_put; sg_kobj = kobject_create_and_add("sensor_groups", opal_kobj); if (!sg_kobj) { @@ -222,6 +222,7 @@ void __init opal_sensor_groups_init(void) } i++; } + of_node_put(sg); return; @@ -231,6 +232,9 @@ out_sgs_sgattrs: kfree(sgs[i].sg.attrs); } kobject_put(sg_kobj); + of_node_put(node); out_sgs: kfree(sgs); +out_sg_put: + of_node_put(sg); } diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c index 3192c614a1e1..8880a1c14573 100644 --- a/arch/powerpc/platforms/powernv/opal-sensor.c +++ b/arch/powerpc/platforms/powernv/opal-sensor.c @@ -6,7 +6,9 @@ */ #include <linux/delay.h> +#include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/opal.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index e5acc33b3b20..0ed95f753416 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -57,7 +57,7 @@ opal_return: .long 0xa64b7b7d /* mthsrr1 r11 */ .long 0x2402004c /* hrfid */ #endif - ld r2,PACATOC(r13) + LOAD_PACA_TOC() ld r0,PPC_LR_STKOFF(r1) mtlr r0 blr diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 6b4eed2ef4fa..748c2b97fa53 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -165,10 +165,15 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn, ent->chip = chip; snprintf(ent->name, 16, "%08x", chip); ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn); + if (!ent->path.data) { + kfree(ent); + return -ENOMEM; + } + ent->path.size = strlen((char *)ent->path.data); dir = debugfs_create_dir(ent->name, root); - if (!dir) { + if (IS_ERR(dir)) { kfree(ent->path.data); kfree(ent); return -1; @@ -190,7 +195,7 @@ static int scom_debug_init(void) return 0; root = debugfs_create_dir("scom", arch_debugfs_dir); - if (!root) + if (IS_ERR(root)) return -1; rc = 0; diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 55a8fbfdb5b2..9ec265fcaff4 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -180,10 +180,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node, /* * Allocate a buffer to hold the MC recoverable ranges. */ - mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); - if (!mc_recoverable_range) - panic("%s: Failed to allocate %u bytes align=0x%lx\n", - __func__, size, __alignof__(u64)); + mc_recoverable_range = memblock_alloc_or_panic(size, __alignof__(u64)); for (i = 0; i < mc_recoverable_range_len; i++) { mc_recoverable_range[i].start_addr = @@ -424,7 +421,7 @@ static int __init opal_message_init(struct device_node *opal_node) return 0; } -int opal_get_chars(uint32_t vtermno, char *buf, int count) +ssize_t opal_get_chars(uint32_t vtermno, u8 *buf, size_t count) { s64 rc; __be64 evt, len; @@ -441,10 +438,11 @@ int opal_get_chars(uint32_t vtermno, char *buf, int count) return 0; } -static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic) +static ssize_t __opal_put_chars(uint32_t vtermno, const u8 *data, + size_t total_len, bool atomic) { unsigned long flags = 0 /* shut up gcc */; - int written; + ssize_t written; __be64 olen; s64 rc; @@ -484,7 +482,7 @@ static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, b if (atomic) { /* Should not happen */ pr_warn("atomic console write returned partial " - "len=%d written=%d\n", total_len, written); + "len=%zu written=%zd\n", total_len, written); } if (!written) written = -EAGAIN; @@ -497,7 +495,7 @@ out: return written; } -int opal_put_chars(uint32_t vtermno, const char *data, int total_len) +ssize_t opal_put_chars(uint32_t vtermno, const u8 *data, size_t total_len) { return __opal_put_chars(vtermno, data, total_len, false); } @@ -508,7 +506,8 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) * true at the moment because console space can race with OPAL's console * writes. */ -int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len) +ssize_t opal_put_chars_atomic(uint32_t vtermno, const u8 *data, + size_t total_len) { return __opal_put_chars(vtermno, data, total_len, true); } @@ -790,14 +789,6 @@ static int __init opal_sysfs_init(void) return 0; } -static ssize_t export_attr_read(struct file *fp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, - loff_t off, size_t count) -{ - return memory_read_from_buffer(buf, count, &off, bin_attr->private, - bin_attr->size); -} - static int opal_add_one_export(struct kobject *parent, const char *export_name, struct device_node *np, const char *prop_name) { @@ -824,7 +815,7 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name, sysfs_bin_attr_init(attr); attr->attr.name = name; attr->attr.mode = 0400; - attr->read = export_attr_read; + attr->read_new = sysfs_bin_attr_simple_read; attr->private = __va(vals[0]); attr->size = vals[1]; @@ -892,6 +883,7 @@ static void opal_export_attrs(void) kobj = kobject_create_and_add("exports", opal_kobj); if (!kobj) { pr_warn("kobject_create_and_add() of exports failed\n"); + of_node_put(np); return; } @@ -952,6 +944,8 @@ static void __init opal_imc_init_dev(void) np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT); if (np) of_platform_device_create(np, NULL, NULL); + + of_node_put(np); } static int kopald(void *unused) diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c deleted file mode 100644 index 7e419de71db8..000000000000 --- a/arch/powerpc/platforms/powernv/pci-cxl.c +++ /dev/null @@ -1,153 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014-2016 IBM Corp. - */ - -#include <linux/module.h> -#include <misc/cxl-base.h> -#include <asm/pnv-pci.h> -#include <asm/opal.h> - -#include "pci.h" - -int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - struct pnv_ioda_pe *pe; - int rc; - - pe = pnv_ioda_get_pe(dev); - if (!pe) - return -ENODEV; - - pe_info(pe, "Switching PHB to CXL\n"); - - rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number); - if (rc == OPAL_UNSUPPORTED) - dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n"); - else if (rc) - dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc); - - return rc; -} -EXPORT_SYMBOL(pnv_phb_to_cxl_mode); - -/* Find PHB for cxl dev and allocate MSI hwirqs? - * Returns the absolute hardware IRQ number - */ -int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num); - - if (hwirq < 0) { - dev_warn(&dev->dev, "Failed to find a free MSI\n"); - return -ENOSPC; - } - - return phb->msi_base + hwirq; -} -EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs); - -void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - - msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num); -} -EXPORT_SYMBOL(pnv_cxl_release_hwirqs); - -void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, - struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int i, hwirq; - - for (i = 1; i < CXL_IRQ_RANGES; i++) { - if (!irqs->range[i]) - continue; - pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n", - i, irqs->offset[i], - irqs->range[i]); - hwirq = irqs->offset[i] - phb->msi_base; - msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, - irqs->range[i]); - } -} -EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges); - -int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs, - struct pci_dev *dev, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int i, hwirq, try; - - memset(irqs, 0, sizeof(struct cxl_irq_ranges)); - - /* 0 is reserved for the multiplexed PSL DSI interrupt */ - for (i = 1; i < CXL_IRQ_RANGES && num; i++) { - try = num; - while (try) { - hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try); - if (hwirq >= 0) - break; - try /= 2; - } - if (!try) - goto fail; - - irqs->offset[i] = phb->msi_base + hwirq; - irqs->range[i] = try; - pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n", - i, irqs->offset[i], irqs->range[i]); - num -= try; - } - if (num) - goto fail; - - return 0; -fail: - pnv_cxl_release_hwirq_ranges(irqs, dev); - return -ENOSPC; -} -EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges); - -int pnv_cxl_get_irq_count(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - - return phb->msi_bmp.irq_count; -} -EXPORT_SYMBOL(pnv_cxl_get_irq_count); - -int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, - unsigned int virq) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - unsigned int xive_num = hwirq - phb->msi_base; - struct pnv_ioda_pe *pe; - int rc; - - if (!(pe = pnv_ioda_get_pe(dev))) - return -ENODEV; - - /* Assign XIVE to PE */ - rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); - if (rc) { - pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x " - "hwirq 0x%x XIVE 0x%x PE\n", - pci_name(dev), rc, phb->msi_base, hwirq, xive_num); - return -EIO; - } - pnv_set_msi_irq_chip(phb, virq); - - return 0; -} -EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index c8cf2728031a..d8ccf2c9b98a 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -39,17 +39,12 @@ #include <asm/mmzone.h> #include <asm/xive.h> -#include <misc/cxl-base.h> - #include "powernv.h" #include "pci.h" #include "../../../../drivers/pci/pci.h" -#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */ -#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */ -#define PNV_IODA1_DMA32_SEGSIZE 0x10000000 - -static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" }; +/* This array is indexed with enum pnv_phb_type */ +static const char * const pnv_phb_names[] = { "IODA2", "NPU_OCAPI" }; static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); static void pnv_pci_configure_bus(struct pci_bus *bus); @@ -67,7 +62,7 @@ void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, vaf.va = &args; if (pe->flags & PNV_IODA_PE_DEV) - strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); + strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) sprintf(pfix, "%04x:%02x ", pci_domain_nr(pe->pbus), pe->pbus->number); @@ -280,86 +275,6 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, } } -static int pnv_ioda1_init_m64(struct pnv_phb *phb) -{ - struct resource *r; - int index; - - /* - * There are 16 M64 BARs, each of which has 8 segments. So - * there are as many M64 segments as the maximum number of - * PEs, which is 128. - */ - for (index = 0; index < PNV_IODA1_M64_NUM; index++) { - unsigned long base, segsz = phb->ioda.m64_segsize; - int64_t rc; - - base = phb->ioda.m64_base + - index * PNV_IODA1_M64_SEGS * segsz; - rc = opal_pci_set_phb_mem_window(phb->opal_id, - OPAL_M64_WINDOW_TYPE, index, base, 0, - PNV_IODA1_M64_SEGS * segsz); - if (rc != OPAL_SUCCESS) { - pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n", - rc, phb->hose->global_number, index); - goto fail; - } - - rc = opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, index, - OPAL_ENABLE_M64_SPLIT); - if (rc != OPAL_SUCCESS) { - pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n", - rc, phb->hose->global_number, index); - goto fail; - } - } - - for (index = 0; index < phb->ioda.total_pe_num; index++) { - int64_t rc; - - /* - * P7IOC supports M64DT, which helps mapping M64 segment - * to one particular PE#. However, PHB3 has fixed mapping - * between M64 segment and PE#. In order to have same logic - * for P7IOC and PHB3, we enforce fixed mapping between M64 - * segment and PE# on P7IOC. - */ - rc = opal_pci_map_pe_mmio_window(phb->opal_id, - index, OPAL_M64_WINDOW_TYPE, - index / PNV_IODA1_M64_SEGS, - index % PNV_IODA1_M64_SEGS); - if (rc != OPAL_SUCCESS) { - pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n", - __func__, rc, phb->hose->global_number, - index); - goto fail; - } - } - - /* - * Exclude the segments for reserved and root bus PE, which - * are first or last two PEs. - */ - r = &phb->hose->mem_resources[1]; - if (phb->ioda.reserved_pe_idx == 0) - r->start += (2 * phb->ioda.m64_segsize); - else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) - r->end -= (2 * phb->ioda.m64_segsize); - else - WARN(1, "Wrong reserved PE#%x on PHB#%x\n", - phb->ioda.reserved_pe_idx, phb->hose->global_number); - - return 0; - -fail: - for ( ; index >= 0; index--) - opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64); - - return -EIO; -} - static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus, unsigned long *pe_bitmap, bool all) @@ -443,7 +358,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) const __be32 *r; u64 pci_addr; - if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) { + if (phb->type != PNV_PHB_IODA2) { pr_info(" Not support M64 window\n"); return; } @@ -518,10 +433,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) * Setup init functions for M64 based on IODA version, IODA3 uses * the IODA2 code. */ - if (phb->type == PNV_PHB_IODA1) - phb->init_m64 = pnv_ioda1_init_m64; - else - phb->init_m64 = pnv_ioda2_init_m64; + phb->init_m64 = pnv_ioda2_init_m64; } static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no) @@ -952,29 +864,8 @@ int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) for (rid = pe->rid; rid < rid_end; rid++) phb->ioda.pe_rmap[rid] = pe->pe_number; - /* Setup one MVTs on IODA1 */ - if (phb->type != PNV_PHB_IODA1) { - pe->mve_number = 0; - goto out; - } - - pe->mve_number = pe->pe_number; - rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); - if (rc != OPAL_SUCCESS) { - pe_err(pe, "OPAL error %ld setting up MVE %x\n", - rc, pe->mve_number); - pe->mve_number = -1; - } else { - rc = opal_pci_set_mve_enable(phb->opal_id, - pe->mve_number, OPAL_ENABLE_MVE); - if (rc) { - pe_err(pe, "OPAL error %ld enabling MVE %x\n", - rc, pe->mve_number); - pe->mve_number = -1; - } - } + pe->mve_number = 0; -out: return 0; } @@ -1097,9 +988,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) return pe; } -static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe); - static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); @@ -1107,14 +995,14 @@ static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) struct pnv_ioda_pe *pe; /* Check if the BDFN for this device is associated with a PE yet */ - pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); + pe = pnv_pci_bdfn_to_pe(phb, pci_dev_id(pdev)); if (!pe) { /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */ if (WARN_ON(pdev->is_virtfn)) return; pnv_pci_configure_bus(pdev->bus); - pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); + pe = pnv_pci_bdfn_to_pe(phb, pci_dev_id(pdev)); pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff); @@ -1134,9 +1022,6 @@ static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) */ if (!pe->dma_setup_done && !pci_is_bridge(pdev)) { switch (phb->type) { - case PNV_PHB_IODA1: - pnv_pci_ioda1_setup_dma_pe(phb, pe); - break; case PNV_PHB_IODA2: pnv_pci_ioda2_setup_dma_pe(phb, pe); break; @@ -1273,53 +1158,6 @@ static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb) return phb->regs + 0x210; } -static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, - unsigned long index, unsigned long npages) -{ - struct iommu_table_group_link *tgl = list_first_entry_or_null( - &tbl->it_group_list, struct iommu_table_group_link, - next); - struct pnv_ioda_pe *pe = container_of(tgl->table_group, - struct pnv_ioda_pe, table_group); - __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); - unsigned long start, end, inc; - - start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); - end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset + - npages - 1); - - /* p7ioc-style invalidation, 2 TCEs per write */ - start |= (1ull << 63); - end |= (1ull << 63); - inc = 16; - end |= inc - 1; /* round up end to be different than start */ - - mb(); /* Ensure above stores are visible */ - while (start <= end) { - __raw_writeq_be(start, invalidate); - start += inc; - } - - /* - * The iommu layer will do another mb() for us on build() - * and we don't care on free() - */ -} - -static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, - long npages, unsigned long uaddr, - enum dma_data_direction direction, - unsigned long attrs) -{ - int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, - attrs); - - if (!ret) - pnv_pci_p7ioc_tce_invalidate(tbl, index, npages); - - return ret; -} - #ifdef CONFIG_IOMMU_API /* Common for IODA1 and IODA2 */ static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, @@ -1329,25 +1167,6 @@ static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, } #endif -static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, - long npages) -{ - pnv_tce_free(tbl, index, npages); - - pnv_pci_p7ioc_tce_invalidate(tbl, index, npages); -} - -static struct iommu_table_ops pnv_ioda1_iommu_ops = { - .set = pnv_ioda1_tce_build, -#ifdef CONFIG_IOMMU_API - .xchg_no_kill = pnv_ioda_tce_xchg_no_kill, - .tce_kill = pnv_pci_p7ioc_tce_invalidate, - .useraddrptr = pnv_tce_useraddrptr, -#endif - .clear = pnv_ioda1_tce_free, - .get = pnv_tce_get, -}; - #define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0) #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1) #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2) @@ -1453,177 +1272,6 @@ static struct iommu_table_ops pnv_ioda2_iommu_ops = { .free = pnv_pci_ioda2_table_free_pages, }; -static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data) -{ - unsigned int *weight = (unsigned int *)data; - - /* This is quite simplistic. The "base" weight of a device - * is 10. 0 means no DMA is to be accounted for it. - */ - if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) - return 0; - - if (dev->class == PCI_CLASS_SERIAL_USB_UHCI || - dev->class == PCI_CLASS_SERIAL_USB_OHCI || - dev->class == PCI_CLASS_SERIAL_USB_EHCI) - *weight += 3; - else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID) - *weight += 15; - else - *weight += 10; - - return 0; -} - -static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe) -{ - unsigned int weight = 0; - - /* SRIOV VF has same DMA32 weight as its PF */ -#ifdef CONFIG_PCI_IOV - if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) { - pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight); - return weight; - } -#endif - - if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) { - pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight); - } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) { - struct pci_dev *pdev; - - list_for_each_entry(pdev, &pe->pbus->devices, bus_list) - pnv_pci_ioda_dev_dma_weight(pdev, &weight); - } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) { - pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight); - } - - return weight; -} - -static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe) -{ - - struct page *tce_mem = NULL; - struct iommu_table *tbl; - unsigned int weight, total_weight = 0; - unsigned int tce32_segsz, base, segs, avail, i; - int64_t rc; - void *addr; - - /* XXX FIXME: Handle 64-bit only DMA devices */ - /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */ - /* XXX FIXME: Allocate multi-level tables on PHB3 */ - weight = pnv_pci_ioda_pe_dma_weight(pe); - if (!weight) - return; - - pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight, - &total_weight); - segs = (weight * phb->ioda.dma32_count) / total_weight; - if (!segs) - segs = 1; - - /* - * Allocate contiguous DMA32 segments. We begin with the expected - * number of segments. With one more attempt, the number of DMA32 - * segments to be allocated is decreased by one until one segment - * is allocated successfully. - */ - do { - for (base = 0; base <= phb->ioda.dma32_count - segs; base++) { - for (avail = 0, i = base; i < base + segs; i++) { - if (phb->ioda.dma32_segmap[i] == - IODA_INVALID_PE) - avail++; - } - - if (avail == segs) - goto found; - } - } while (--segs); - - if (!segs) { - pe_warn(pe, "No available DMA32 segments\n"); - return; - } - -found: - tbl = pnv_pci_table_alloc(phb->hose->node); - if (WARN_ON(!tbl)) - return; - - iommu_register_group(&pe->table_group, phb->hose->global_number, - pe->pe_number); - pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group); - - /* Grab a 32-bit TCE table */ - pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n", - weight, total_weight, base, segs); - pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", - base * PNV_IODA1_DMA32_SEGSIZE, - (base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1); - - /* XXX Currently, we allocate one big contiguous table for the - * TCEs. We only really need one chunk per 256M of TCE space - * (ie per segment) but that's an optimization for later, it - * requires some added smarts with our get/put_tce implementation - * - * Each TCE page is 4KB in size and each TCE entry occupies 8 - * bytes - */ - tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3); - tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL, - get_order(tce32_segsz * segs)); - if (!tce_mem) { - pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); - goto fail; - } - addr = page_address(tce_mem); - memset(addr, 0, tce32_segsz * segs); - - /* Configure HW */ - for (i = 0; i < segs; i++) { - rc = opal_pci_map_pe_dma_window(phb->opal_id, - pe->pe_number, - base + i, 1, - __pa(addr) + tce32_segsz * i, - tce32_segsz, IOMMU_PAGE_SIZE_4K); - if (rc) { - pe_err(pe, " Failed to configure 32-bit TCE table, err %lld\n", - rc); - goto fail; - } - } - - /* Setup DMA32 segment mapping */ - for (i = base; i < base + segs; i++) - phb->ioda.dma32_segmap[i] = pe->pe_number; - - /* Setup linux iommu table */ - pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs, - base * PNV_IODA1_DMA32_SEGSIZE, - IOMMU_PAGE_SHIFT_4K); - - tbl->it_ops = &pnv_ioda1_iommu_ops; - pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; - pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; - if (!iommu_init_table(tbl, phb->hose->node, 0, 0)) - panic("Failed to initialize iommu table"); - - pe->dma_setup_done = true; - return; - fail: - /* XXX Failure: Try to fallback to 64-bit only ? */ - if (tce_mem) - __free_pages(tce_mem, get_order(tce32_segsz * segs)); - if (tbl) { - pnv_pci_unlink_table_and_group(tbl, &pe->table_group); - iommu_tce_table_put(tbl); - } -} - static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group, int num, struct iommu_table *tbl) { @@ -1739,7 +1387,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) * DMA window can be larger than available memory, which will * cause errors later. */ - const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER - 1); + const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_PAGE_ORDER); /* * We create the default window as big as we can. The constraint is @@ -1779,6 +1427,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; } + tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number; if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end)) rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); else @@ -1886,13 +1535,21 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) } } -static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) +static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group, + struct device *dev __maybe_unused) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */ struct iommu_table *tbl = pe->table_group.tables[0]; + /* + * iommu_ops transfers the ownership per a device and we mode + * the group ownership with the first device in the group. + */ + if (!tbl) + return 0; + pnv_pci_ioda2_set_bypass(pe, false); pnv_pci_ioda2_unset_window(&pe->table_group, 0); if (pe->pbus) @@ -1900,13 +1557,19 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) else if (pe->pdev) set_iommu_table_base(&pe->pdev->dev, NULL); iommu_tce_table_put(tbl); + + return 0; } -static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) +static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group, + struct device *dev __maybe_unused) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); + /* See the comment about iommu_ops above */ + if (pe->table_group.tables[0]) + return; pnv_pci_ioda2_setup_default_config(pe); if (pe->pbus) pnv_ioda_setup_bus_dma(pe, pe->pbus); @@ -1971,47 +1634,6 @@ int64_t pnv_opal_pci_msi_eoi(struct irq_data *d) return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq); } -/* - * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers - */ -static void pnv_ioda2_msi_eoi(struct irq_data *d) -{ - int64_t rc; - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - struct pci_controller *hose = irq_data_get_irq_chip_data(d); - struct pnv_phb *phb = hose->private_data; - - rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); - WARN_ON_ONCE(rc); - - icp_native_eoi(d); -} - -/* P8/CXL only */ -void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq) -{ - struct irq_data *idata; - struct irq_chip *ichip; - - /* The MSI EOI OPAL call is only needed on PHB3 */ - if (phb->model != PNV_PHB_MODEL_PHB3) - return; - - if (!phb->ioda.irq_chip_init) { - /* - * First time we setup an MSI IRQ, we need to setup the - * corresponding IRQ chip to route correctly. - */ - idata = irq_get_irq_data(virq); - ichip = irq_data_get_irq_chip(idata); - phb->ioda.irq_chip_init = 1; - phb->ioda.irq_chip = *ichip; - phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; - } - irq_set_chip(virq, &phb->ioda.irq_chip); - irq_set_chip_data(virq, phb->hose); -} - static struct irq_chip pnv_pci_msi_irq_chip; /* @@ -2259,7 +1881,7 @@ static const struct irq_domain_ops pnv_irq_domain_ops = { static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count) { struct pnv_phb *phb = hose->private_data; - struct irq_domain *parent = irq_get_default_host(); + struct irq_domain *parent = irq_get_default_domain(); hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id); if (!hose->fwnode) @@ -2275,7 +1897,7 @@ static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned return -ENOMEM; } - hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn), + hose->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(hose->dn), &pnv_msi_domain_info, hose->dev_domain); if (!hose->msi_domain) { @@ -2323,7 +1945,8 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, int index; int64_t rc; - if (!res || !res->flags || res->start > res->end) + if (!res || !res->flags || res->start > res->end || + res->flags & IORESOURCE_UNSET) return; if (res->flags & IORESOURCE_IO) { @@ -2688,57 +2311,6 @@ static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev) return true; } -static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group, - int num) -{ - struct pnv_ioda_pe *pe = container_of(table_group, - struct pnv_ioda_pe, table_group); - struct pnv_phb *phb = pe->phb; - unsigned int idx; - long rc; - - pe_info(pe, "Removing DMA window #%d\n", num); - for (idx = 0; idx < phb->ioda.dma32_count; idx++) { - if (phb->ioda.dma32_segmap[idx] != pe->pe_number) - continue; - - rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, - idx, 0, 0ul, 0ul, 0ul); - if (rc != OPAL_SUCCESS) { - pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n", - rc, idx); - return rc; - } - - phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE; - } - - pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); - return OPAL_SUCCESS; -} - -static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) -{ - struct iommu_table *tbl = pe->table_group.tables[0]; - int64_t rc; - - if (!pe->dma_setup_done) - return; - - rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0); - if (rc != OPAL_SUCCESS) - return; - - pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size); - if (pe->table_group.group) { - iommu_group_put(pe->table_group.group); - WARN_ON(pe->table_group.group); - } - - free_pages(tbl->it_base, get_order(tbl->it_size << 3)); - iommu_tce_table_put(tbl); -} - void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) { struct iommu_table *tbl = pe->table_group.tables[0]; @@ -2787,13 +2359,7 @@ static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe) { struct pnv_phb *phb = pe->phb; - if (phb->type == PNV_PHB_IODA1) { - pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE, - phb->ioda.io_segmap); - pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, - phb->ioda.m32_segmap); - /* M64 is pre-configured by pnv_ioda1_init_m64() */ - } else if (phb->type == PNV_PHB_IODA2) { + if (phb->type == PNV_PHB_IODA2) { pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, phb->ioda.m32_segmap); } @@ -2811,9 +2377,6 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) mutex_unlock(&phb->ioda.pe_list_mutex); switch (phb->type) { - case PNV_PHB_IODA1: - pnv_pci_ioda1_release_pe_dma(pe); - break; case PNV_PHB_IODA2: pnv_pci_ioda2_release_pe_dma(pe); break; @@ -2912,6 +2475,27 @@ static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus) } } +#ifdef CONFIG_IOMMU_API +static struct iommu_group *pnv_pci_device_group(struct pci_controller *hose, + struct pci_dev *pdev) +{ + struct pnv_phb *phb = hose->private_data; + struct pnv_ioda_pe *pe; + + if (WARN_ON(!phb)) + return ERR_PTR(-ENODEV); + + pe = pnv_pci_bdfn_to_pe(phb, pci_dev_id(pdev)); + if (!pe) + return ERR_PTR(-ENODEV); + + if (!pe->table_group.group) + return ERR_PTR(-ENODEV); + + return iommu_group_ref_get(pe->table_group.group); +} +#endif + static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .dma_dev_setup = pnv_pci_ioda_dma_dev_setup, .dma_bus_setup = pnv_pci_ioda_dma_bus_setup, @@ -2922,6 +2506,9 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .setup_bridge = pnv_pci_fixup_bridge_resources, .reset_secondary_bus = pnv_pci_reset_secondary_bus, .shutdown = pnv_pci_ioda_shutdown, +#ifdef CONFIG_IOMMU_API + .device_group = pnv_pci_device_group, +#endif }; static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = { @@ -2938,7 +2525,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, struct pci_controller *hose; struct pnv_phb *phb; unsigned long size, m64map_off, m32map_off, pemap_off; - unsigned long iomap_off = 0, dma32map_off = 0; struct pnv_ioda_pe *root_pe; struct resource r; const __be64 *prop64; @@ -3049,10 +2635,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num; phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ - /* Calculate how many 32-bit TCE segments we have */ - phb->ioda.dma32_count = phb->ioda.m32_pci_base / - PNV_IODA1_DMA32_SEGSIZE; - /* Allocate aux data & arrays. We don't have IO ports on PHB3 */ size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, sizeof(unsigned long)); @@ -3060,13 +2642,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]); m32map_off = size; size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]); - if (phb->type == PNV_PHB_IODA1) { - iomap_off = size; - size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]); - dma32map_off = size; - size += phb->ioda.dma32_count * - sizeof(phb->ioda.dma32_segmap[0]); - } pemap_off = size; size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); aux = kzalloc(size, GFP_KERNEL); @@ -3080,15 +2655,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb->ioda.m64_segmap[segno] = IODA_INVALID_PE; phb->ioda.m32_segmap[segno] = IODA_INVALID_PE; } - if (phb->type == PNV_PHB_IODA1) { - phb->ioda.io_segmap = aux + iomap_off; - for (segno = 0; segno < phb->ioda.total_pe_num; segno++) - phb->ioda.io_segmap[segno] = IODA_INVALID_PE; - - phb->ioda.dma32_segmap = aux + dma32map_off; - for (segno = 0; segno < phb->ioda.dma32_count; segno++) - phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE; - } phb->ioda.pe_array = aux + pemap_off; /* @@ -3112,10 +2678,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, INIT_LIST_HEAD(&phb->ioda.pe_list); mutex_init(&phb->ioda.pe_list_mutex); - /* Calculate how many 32-bit TCE segments we have */ - phb->ioda.dma32_count = phb->ioda.m32_pci_base / - PNV_IODA1_DMA32_SEGSIZE; - #if 0 /* We should really do that ... */ rc = opal_pci_set_phb_mem_window(opal->phb_id, window_type, @@ -3222,27 +2784,3 @@ static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev) dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; } DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup); - -void __init pnv_pci_init_ioda_hub(struct device_node *np) -{ - struct device_node *phbn; - const __be64 *prop64; - u64 hub_id; - - pr_info("Probing IODA IO-Hub %pOF\n", np); - - prop64 = of_get_property(np, "ibm,opal-hubid", NULL); - if (!prop64) { - pr_err(" Missing \"ibm,opal-hubid\" property !\n"); - return; - } - hub_id = be64_to_cpup(prop64); - pr_devel(" HUB-ID : 0x%016llx\n", hub_id); - - /* Count child PHBs */ - for_each_child_of_node(np, phbn) { - /* Look for IODA1 PHBs */ - if (of_device_is_compatible(phbn, "ibm,ioda-phb")) - pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1); - } -} diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 7195133b26bb..cc7b1dd54ac6 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -238,7 +238,7 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) } else if (pdev->is_physfn) { /* * For PFs adjust their allocated IOV resources to match what - * the PHB can support using it's M64 BAR table. + * the PHB can support using its M64 BAR table. */ pnv_pci_ioda_fixup_iov_resources(pdev); } @@ -594,12 +594,12 @@ static void pnv_pci_sriov_disable(struct pci_dev *pdev) struct pnv_iov_data *iov; iov = pnv_iov_get(pdev); - num_vfs = iov->num_vfs; - base_pe = iov->vf_pe_arr[0].pe_number; - if (WARN_ON(!iov)) return; + num_vfs = iov->num_vfs; + base_pe = iov->vf_pe_arr[0].pe_number; + /* Release VF PEs */ pnv_ioda_release_vf_PE(pdev); @@ -658,7 +658,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) list_add_tail(&pe->list, &phb->ioda.pe_list); mutex_unlock(&phb->ioda.pe_list_mutex); - /* associate this pe to it's pdn */ + /* associate this pe to its pdn */ list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) { if (vf_pdn->busno == vf_bus && vf_pdn->devfn == vf_devfn) { diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 233a50e65fce..b2c1da025410 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -14,7 +14,6 @@ #include <linux/io.h> #include <linux/msi.h> #include <linux/iommu.h> -#include <linux/sched/mm.h> #include <asm/sections.h> #include <asm/io.h> @@ -33,8 +32,6 @@ #include "powernv.h" #include "pci.h" -static DEFINE_MUTEX(tunnel_mutex); - int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) { struct device_node *node = np; @@ -744,64 +741,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid) return tbl; } -struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - - return of_node_get(hose->dn); -} -EXPORT_SYMBOL(pnv_pci_get_phb_node); - -int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) -{ - struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); - u64 tunnel_bar; - __be64 val; - int rc; - - if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) - return -ENXIO; - if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) - return -ENXIO; - - mutex_lock(&tunnel_mutex); - rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); - if (rc != OPAL_SUCCESS) { - rc = -EIO; - goto out; - } - tunnel_bar = be64_to_cpu(val); - if (enable) { - /* - * Only one device per PHB can use atomics. - * Our policy is first-come, first-served. - */ - if (tunnel_bar) { - if (tunnel_bar != addr) - rc = -EBUSY; - else - rc = 0; /* Setting same address twice is ok */ - goto out; - } - } else { - /* - * The device that owns atomics and wants to release - * them must pass the same address with enable == 0. - */ - if (tunnel_bar != addr) { - rc = -EPERM; - goto out; - } - addr = 0x0ULL; - } - rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); - rc = opal_error_code(rc); -out: - mutex_unlock(&tunnel_mutex); - return rc; -} -EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); - void pnv_pci_shutdown(void) { struct pci_controller *hose; @@ -845,11 +784,6 @@ void __init pnv_pci_init(void) pcie_ports_disabled = true; #endif - /* Look for IODA IO-Hubs. */ - for_each_compatible_node(np, NULL, "ibm,ioda-hub") { - pnv_pci_init_ioda_hub(np); - } - /* Look for ioda2 built-in PHB3's */ for_each_compatible_node(np, NULL, "ibm,ioda2-phb") pnv_pci_init_ioda2_phb(np); @@ -865,28 +799,3 @@ void __init pnv_pci_init(void) /* Configure IOMMU DMA hooks */ set_pci_dma_ops(&dma_iommu_ops); } - -static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - - switch (action) { - case BUS_NOTIFY_DEL_DEVICE: - iommu_del_device(dev); - return 0; - default: - return 0; - } -} - -static struct notifier_block pnv_tce_iommu_bus_nb = { - .notifier_call = pnv_tce_iommu_bus_notifier, -}; - -static int __init pnv_tce_iommu_bus_notifier_init(void) -{ - bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb); - return 0; -} -machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index f12643958b8d..42075501663b 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -10,7 +10,6 @@ struct pci_dn; enum pnv_phb_type { - PNV_PHB_IODA1, PNV_PHB_IODA2, PNV_PHB_NPU_OCAPI, }; @@ -163,12 +162,7 @@ struct pnv_phb { unsigned int *m32_segmap; unsigned int *io_segmap; - /* DMA32 segment maps - IODA1 only */ - unsigned int dma32_count; - unsigned int *dma32_segmap; - /* IRQ chip */ - int irq_chip_init; struct irq_chip irq_chip; /* Sorted list of used PE's based @@ -279,7 +273,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn, int where, int size, u32 val); extern struct iommu_table *pnv_pci_table_alloc(int nid); -extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np); extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); @@ -287,7 +280,6 @@ extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn); extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev); -extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq); extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, __u64 window_size, __u32 levels); extern int pnv_eeh_post_init(void); diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c index 3805ad13b8f3..196aa70fe043 100644 --- a/arch/powerpc/platforms/powernv/rng.c +++ b/arch/powerpc/platforms/powernv/rng.c @@ -21,24 +21,15 @@ #define DARN_ERR 0xFFFFFFFFFFFFFFFFul -struct powernv_rng { +struct pnv_rng { void __iomem *regs; void __iomem *regs_real; unsigned long mask; }; -static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng); +static DEFINE_PER_CPU(struct pnv_rng *, pnv_rng); -int powernv_hwrng_present(void) -{ - struct powernv_rng *rng; - - rng = get_cpu_var(powernv_rng); - put_cpu_var(rng); - return rng != NULL; -} - -static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val) +static unsigned long rng_whiten(struct pnv_rng *rng, unsigned long val) { unsigned long parity; @@ -58,18 +49,7 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val) return val; } -int powernv_get_random_real_mode(unsigned long *v) -{ - struct powernv_rng *rng; - - rng = raw_cpu_read(powernv_rng); - - *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real)); - - return 1; -} - -static int powernv_get_random_darn(unsigned long *v) +static int pnv_get_random_darn(unsigned long *v) { unsigned long val; @@ -93,29 +73,31 @@ static int __init initialise_darn(void) return -ENODEV; for (i = 0; i < 10; i++) { - if (powernv_get_random_darn(&val)) { - ppc_md.get_random_seed = powernv_get_random_darn; + if (pnv_get_random_darn(&val)) { + ppc_md.get_random_seed = pnv_get_random_darn; return 0; } } return -EIO; } -int powernv_get_random_long(unsigned long *v) +int pnv_get_random_long(unsigned long *v) { - struct powernv_rng *rng; - - rng = get_cpu_var(powernv_rng); - - *v = rng_whiten(rng, in_be64(rng->regs)); - - put_cpu_var(rng); - + struct pnv_rng *rng; + + if (mfmsr() & MSR_DR) { + rng = get_cpu_var(pnv_rng); + *v = rng_whiten(rng, in_be64(rng->regs)); + put_cpu_var(rng); + } else { + rng = raw_cpu_read(pnv_rng); + *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real)); + } return 1; } -EXPORT_SYMBOL_GPL(powernv_get_random_long); +EXPORT_SYMBOL_GPL(pnv_get_random_long); -static __init void rng_init_per_cpu(struct powernv_rng *rng, +static __init void rng_init_per_cpu(struct pnv_rng *rng, struct device_node *dn) { int chip_id, cpu; @@ -125,16 +107,16 @@ static __init void rng_init_per_cpu(struct powernv_rng *rng, pr_warn("No ibm,chip-id found for %pOF.\n", dn); for_each_possible_cpu(cpu) { - if (per_cpu(powernv_rng, cpu) == NULL || + if (per_cpu(pnv_rng, cpu) == NULL || cpu_to_chip_id(cpu) == chip_id) { - per_cpu(powernv_rng, cpu) = rng; + per_cpu(pnv_rng, cpu) = rng; } } } static __init int rng_create(struct device_node *dn) { - struct powernv_rng *rng; + struct pnv_rng *rng; struct resource res; unsigned long val; @@ -160,7 +142,7 @@ static __init int rng_create(struct device_node *dn) rng_init_per_cpu(rng, dn); - ppc_md.get_random_seed = powernv_get_random_long; + ppc_md.get_random_seed = pnv_get_random_long; return 0; } @@ -208,7 +190,7 @@ static int __init pnv_rng_late_init(void) if (ppc_md.get_random_seed == pnv_get_random_long_early) pnv_get_random_long_early(&v); - if (ppc_md.get_random_seed == powernv_get_random_long) { + if (ppc_md.get_random_seed == pnv_get_random_long) { for_each_compatible_node(dn, NULL, "ibm,power-rng") of_platform_device_create(dn, NULL, NULL); } diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index dac545aa0308..4dbb47ddbdcc 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -17,6 +17,7 @@ #include <linux/console.h> #include <linux/delay.h> #include <linux/irq.h> +#include <linux/seq_buf.h> #include <linux/seq_file.h> #include <linux/of.h> #include <linux/of_fdt.h> @@ -207,8 +208,29 @@ static void __init pnv_setup_arch(void) pnv_rng_init(); } +static void __init pnv_add_hw_description(void) +{ + struct device_node *dn; + const char *s; + + dn = of_find_node_by_path("/ibm,opal/firmware"); + if (!dn) + return; + + if (of_property_read_string(dn, "version", &s) == 0 || + of_property_read_string(dn, "git-id", &s) == 0) + seq_buf_printf(&ppc_hw_desc, "opal:%s ", s); + + if (of_property_read_string(dn, "mi-version", &s) == 0) + seq_buf_printf(&ppc_hw_desc, "mi:%s ", s); + + of_node_put(dn); +} + static void __init pnv_init(void) { + pnv_add_hw_description(); + /* * Initialize the LPC bus now so that legacy serial * ports can be found on it @@ -460,15 +482,7 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) #ifdef CONFIG_MEMORY_HOTPLUG static unsigned long pnv_memory_block_size(void) { - /* - * We map the kernel linear region with 1GB large pages on radix. For - * memory hot unplug to work our memory block size must be at least - * this size. - */ - if (radix_enabled()) - return radix_mem_block_size; - else - return 256UL * 1024 * 1024; + return memory_block_size; } #endif @@ -490,9 +504,6 @@ static void __init pnv_setup_machdep_opal(void) static int __init pnv_probe(void) { - if (!of_machine_is_compatible("ibm,powernv")) - return 0; - if (firmware_has_feature(FW_FEATURE_OPAL)) pnv_setup_machdep_opal(); @@ -556,6 +567,7 @@ static long pnv_machine_check_early(struct pt_regs *regs) define_machine(powernv) { .name = "PowerNV", + .compatible = "ibm,powernv", .probe = pnv_probe, .setup_arch = pnv_setup_arch, .init_IRQ = pnv_init_IRQ, @@ -565,7 +577,6 @@ define_machine(powernv) { .progress = pnv_progress, .machine_shutdown = pnv_shutdown, .power_save = NULL, - .calibrate_decr = generic_calibrate_decr, .machine_check_early = pnv_machine_check_early, #ifdef CONFIG_KEXEC_CORE .kexec_cpu_down = pnv_kexec_cpu_down, diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 9e1a25398f98..8f41ef364fc6 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -28,7 +28,7 @@ #include <asm/xive.h> #include <asm/opal.h> #include <asm/runlatch.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/dbell.h> #include <asm/kvm_ppc.h> #include <asm/ppc-opcode.h> @@ -36,6 +36,7 @@ #include <asm/kexec.h> #include <asm/reg.h> #include <asm/powernv.h> +#include <asm/systemcfg.h> #include "powernv.h" @@ -136,7 +137,9 @@ static int pnv_smp_cpu_disable(void) * the generic fixup_irqs. --BenH. */ set_cpu_online(cpu, false); - vdso_data->processorCount--; +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->processorCount--; +#endif if (cpu == boot_cpuid) boot_cpuid = cpumask_any(cpu_online_mask); if (xive_enabled()) @@ -434,7 +437,7 @@ void __init pnv_smp_init(void) smp_ops = &pnv_smp_ops; #ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP crash_wake_offline = 1; #endif #endif diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c index 7e98b00ea2e8..393e747541fb 100644 --- a/arch/powerpc/platforms/powernv/subcore.c +++ b/arch/powerpc/platforms/powernv/subcore.c @@ -20,6 +20,8 @@ #include <asm/opal.h> #include <asm/smp.h> +#include <trace/events/ipi.h> + #include "subcore.h" #include "powernv.h" @@ -415,13 +417,16 @@ static DEVICE_ATTR(subcores_per_core, 0644, static int subcore_init(void) { + struct device *dev_root; unsigned pvr_ver; + int rc = 0; pvr_ver = PVR_VER(mfspr(SPRN_PVR)); if (pvr_ver != PVR_POWER8 && pvr_ver != PVR_POWER8E && - pvr_ver != PVR_POWER8NVL) + pvr_ver != PVR_POWER8NVL && + pvr_ver != PVR_HX_C2000) return 0; /* @@ -435,7 +440,11 @@ static int subcore_init(void) set_subcores_per_core(1); - return device_create_file(cpu_subsys.dev_root, - &dev_attr_subcores_per_core); + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + rc = device_create_file(dev_root, &dev_attr_subcores_per_core); + put_device(dev_root); + } + return rc; } machine_device_initcall(powernv, subcore_init); diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c index 67c8c4b2d8b1..157d9a8134e4 100644 --- a/arch/powerpc/platforms/powernv/ultravisor.c +++ b/arch/powerpc/platforms/powernv/ultravisor.c @@ -32,15 +32,15 @@ int __init early_init_dt_scan_ultravisor(unsigned long node, const char *uname, static struct memcons *uv_memcons; static ssize_t uv_msglog_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { return memcons_copy(uv_memcons, to, pos, count); } -static struct bin_attribute uv_msglog_attr = { +static struct bin_attribute uv_msglog_attr __ro_after_init = { .attr = {.name = "msglog", .mode = 0400}, - .read = uv_msglog_read + .read_new = uv_msglog_read }; static int __init uv_init(void) diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index c1bfad56447d..2b47d5a86328 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -77,7 +77,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data) /* * VAS can interrupt with multiple page faults. So process all * valid CRBs within fault FIFO until reaches invalid CRB. - * We use CCW[0] and pswid to validate validate CRBs: + * We use CCW[0] and pswid to validate CRBs: * * CCW[0] Reserved bit. When NX pastes CRB, CCW[0]=0 * OS sets this bit to 1 after reading CRB. diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 0072682531d8..5147df3a18ac 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1059,7 +1059,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, } } else { /* - * Interrupt hanlder or fault window setup failed. Means + * Interrupt handler or fault window setup failed. Means * NX can not generate fault for page fault. So not * opening for user space tx window. */ @@ -1310,8 +1310,8 @@ int vas_win_close(struct vas_window *vwin) /* if send window, drop reference to matching receive window */ if (window->tx_win) { if (window->user_win) { - put_vas_user_win_ref(&vwin->task_ref); mm_context_remove_vas_window(vwin->task_ref.mm); + put_vas_user_win_ref(&vwin->task_ref); } put_rx_win(window->rxwin); } diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig index 610682caabc4..706194e5f0b4 100644 --- a/arch/powerpc/platforms/ps3/Kconfig +++ b/arch/powerpc/platforms/ps3/Kconfig @@ -67,6 +67,7 @@ config PS3_VUART config PS3_PS3AV depends on PPC_PS3 tristate "PS3 AV settings driver" if PS3_ADVANCED + select VIDEO select PS3_VUART default y help @@ -165,18 +166,6 @@ config PS3_LPM If you intend to use the advanced performance monitoring and profiling support of the Cell processor with programs like - oprofile and perfmon2, then say Y or M, otherwise say N. - -config PS3GELIC_UDBG - bool "PS3 udbg output via UDP broadcasts on Ethernet" - depends on PPC_PS3 - help - Enables udbg early debugging output by sending broadcast UDP - via the Ethernet port (UDP port number 18194). - - This driver uses a trivial implementation and is independent - from the main PS3 gelic network driver. - - If in doubt, say N here. + perfmon2, then say Y or M, otherwise say N. endmenu diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile index 86bf2967a8d4..bc79bb124d1e 100644 --- a/arch/powerpc/platforms/ps3/Makefile +++ b/arch/powerpc/platforms/ps3/Makefile @@ -3,7 +3,7 @@ obj-y += setup.o mm.o time.o hvcall.o htab.o repository.o obj-y += interrupt.o exports.o os-area.o obj-y += system-bus.o -obj-$(CONFIG_PS3GELIC_UDBG) += gelic_udbg.o +obj-$(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) += gelic_udbg.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SPU_BASE) += spu.o obj-y += device-init.o diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index e87360a0fb40..22d91ac424dd 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/reboot.h> #include <linux/rcuwait.h> +#include <linux/string_choices.h> #include <asm/firmware.h> #include <asm/lv1call.h> @@ -178,7 +179,7 @@ fail_malloc: return result; } -static int __ref ps3_setup_uhc_device( +static int __init ps3_setup_uhc_device( const struct ps3_repository_device *repo, enum ps3_match_id match_id, enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type) { @@ -724,7 +725,7 @@ static irqreturn_t ps3_notification_interrupt(int irq, void *data) static int ps3_notification_read_write(struct ps3_notification_device *dev, u64 lpar, int write) { - const char *op = write ? "write" : "read"; + const char *op = str_write_read(write); unsigned long flags; int res; @@ -770,49 +771,51 @@ static struct task_struct *probe_task; static int ps3_probe_thread(void *data) { - struct ps3_notification_device dev; + struct { + struct ps3_notification_device dev; + u8 buf[512]; + } *local; + struct ps3_notify_cmd *notify_cmd; + struct ps3_notify_event *notify_event; int res; unsigned int irq; u64 lpar; - void *buf; - struct ps3_notify_cmd *notify_cmd; - struct ps3_notify_event *notify_event; pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__); - buf = kzalloc(512, GFP_KERNEL); - if (!buf) + local = kzalloc(sizeof(*local), GFP_KERNEL); + if (!local) return -ENOMEM; - lpar = ps3_mm_phys_to_lpar(__pa(buf)); - notify_cmd = buf; - notify_event = buf; + lpar = ps3_mm_phys_to_lpar(__pa(&local->buf)); + notify_cmd = (struct ps3_notify_cmd *)&local->buf; + notify_event = (struct ps3_notify_event *)&local->buf; /* dummy system bus device */ - dev.sbd.bus_id = (u64)data; - dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID; - dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID; + local->dev.sbd.bus_id = (u64)data; + local->dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID; + local->dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID; - res = lv1_open_device(dev.sbd.bus_id, dev.sbd.dev_id, 0); + res = lv1_open_device(local->dev.sbd.bus_id, local->dev.sbd.dev_id, 0); if (res) { pr_err("%s:%u: lv1_open_device failed %s\n", __func__, __LINE__, ps3_result(res)); goto fail_free; } - res = ps3_sb_event_receive_port_setup(&dev.sbd, PS3_BINDING_CPU_ANY, - &irq); + res = ps3_sb_event_receive_port_setup(&local->dev.sbd, + PS3_BINDING_CPU_ANY, &irq); if (res) { pr_err("%s:%u: ps3_sb_event_receive_port_setup failed %d\n", __func__, __LINE__, res); goto fail_close_device; } - spin_lock_init(&dev.lock); - rcuwait_init(&dev.wait); + spin_lock_init(&local->dev.lock); + rcuwait_init(&local->dev.wait); res = request_irq(irq, ps3_notification_interrupt, 0, - "ps3_notification", &dev); + "ps3_notification", &local->dev); if (res) { pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__, res); @@ -823,46 +826,48 @@ static int ps3_probe_thread(void *data) notify_cmd->operation_code = 0; /* must be zero */ notify_cmd->event_mask = 1UL << notify_region_probe; - res = ps3_notification_read_write(&dev, lpar, 1); + res = ps3_notification_read_write(&local->dev, lpar, 1); if (res) goto fail_free_irq; + set_freezable(); /* Loop here processing the requested notification events. */ do { try_to_freeze(); memset(notify_event, 0, sizeof(*notify_event)); - res = ps3_notification_read_write(&dev, lpar, 0); + res = ps3_notification_read_write(&local->dev, lpar, 0); if (res) break; pr_debug("%s:%u: notify event type 0x%llx bus id %llu dev id %llu" " type %llu port %llu\n", __func__, __LINE__, - notify_event->event_type, notify_event->bus_id, - notify_event->dev_id, notify_event->dev_type, - notify_event->dev_port); + notify_event->event_type, notify_event->bus_id, + notify_event->dev_id, notify_event->dev_type, + notify_event->dev_port); if (notify_event->event_type != notify_region_probe || - notify_event->bus_id != dev.sbd.bus_id) { + notify_event->bus_id != local->dev.sbd.bus_id) { pr_warn("%s:%u: bad notify_event: event %llu, dev_id %llu, dev_type %llu\n", __func__, __LINE__, notify_event->event_type, notify_event->dev_id, notify_event->dev_type); continue; } - ps3_find_and_add_device(dev.sbd.bus_id, notify_event->dev_id); + ps3_find_and_add_device(local->dev.sbd.bus_id, + notify_event->dev_id); } while (!kthread_should_stop()); fail_free_irq: - free_irq(irq, &dev); + free_irq(irq, &local->dev); fail_sb_event_receive_port_destroy: - ps3_sb_event_receive_port_destroy(&dev.sbd, irq); + ps3_sb_event_receive_port_destroy(&local->dev.sbd, irq); fail_close_device: - lv1_close_device(dev.sbd.bus_id, dev.sbd.dev_id); + lv1_close_device(local->dev.sbd.bus_id, local->dev.sbd.dev_id); fail_free: - kfree(buf); + kfree(local); probe_task = NULL; diff --git a/arch/powerpc/platforms/ps3/gelic_udbg.c b/arch/powerpc/platforms/ps3/gelic_udbg.c index 6b298010fd84..a5202c18c236 100644 --- a/arch/powerpc/platforms/ps3/gelic_udbg.c +++ b/arch/powerpc/platforms/ps3/gelic_udbg.c @@ -14,6 +14,7 @@ #include <linux/ip.h> #include <linux/udp.h> +#include <asm/ps3.h> #include <asm/io.h> #include <asm/udbg.h> #include <asm/lv1call.h> diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c index c27e6cf85272..9de62bd52650 100644 --- a/arch/powerpc/platforms/ps3/htab.c +++ b/arch/powerpc/platforms/ps3/htab.c @@ -146,7 +146,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { - panic("ps3_hpte_updateboltedpp() not implemented"); + pr_info("ps3_hpte_updateboltedpp() not implemented"); } static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn, diff --git a/arch/powerpc/platforms/ps3/hvcall.S b/arch/powerpc/platforms/ps3/hvcall.S index 509e30ad01bb..e8ab3d6b03bd 100644 --- a/arch/powerpc/platforms/ps3/hvcall.S +++ b/arch/powerpc/platforms/ps3/hvcall.S @@ -9,6 +9,7 @@ #include <asm/processor.h> #include <asm/ppc_asm.h> +#include <asm/ptrace.h> #define lv1call .long 0x44000022; extsw r3, r3 @@ -16,12 +17,14 @@ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ + stdu r1, -STACK_FRAME_MIN_SIZE(r1); \ li r11, API_NUMBER; \ lv1call; \ + addi r1, r1, STACK_FRAME_MIN_SIZE; \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -38,18 +41,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - stdu r3, -8(r1); \ + std r3, -8(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 8; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -57,21 +61,22 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r3, -8(r1); \ - stdu r4, -16(r1); \ + std r4, -16(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 16; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -79,16 +84,17 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r3, -8(r1); \ std r4, -16(r1); \ - stdu r5, -24(r1); \ + std r5, -24(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 24; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -96,7 +102,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -24(r1); \ std r6, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -104,7 +110,7 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r3, -8(r1); \ std r4, -16(r1); \ @@ -112,12 +118,13 @@ _GLOBAL(_##API_NAME) \ std r6, -32(r1); \ std r7, -40(r1); \ std r8, -48(r1); \ - stdu r9, -56(r1); \ + std r9, -56(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-56(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 56; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+56; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -133,7 +140,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -56(r1); \ std r10, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -141,18 +148,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - stdu r4, -8(r1); \ + std r4, -8(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 8; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -160,21 +168,22 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r4, -8(r1); \ - stdu r5, -16(r1); \ + std r5, -16(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 16; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -182,16 +191,17 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ - stdu r6, -24(r1); \ + std r6, -24(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 24; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -199,7 +209,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -24(r1); \ std r6, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -207,17 +217,18 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ std r6, -24(r1); \ - stdu r7, -32(r1); \ + std r7, -32(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-32(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 32; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+32; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -227,7 +238,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -32(r1); \ std r7, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -235,18 +246,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ std r6, -24(r1); \ std r7, -32(r1); \ - stdu r8, -40(r1); \ + std r8, -40(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-40(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 40; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+40; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -258,7 +270,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -40(r1); \ std r8, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -266,19 +278,20 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ std r6, -24(r1); \ std r7, -32(r1); \ std r8, -40(r1); \ - stdu r9, -48(r1); \ + std r9, -48(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-48(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 48; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+48; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -292,7 +305,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -48(r1); \ std r9, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -300,7 +313,7 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ @@ -308,12 +321,13 @@ _GLOBAL(_##API_NAME) \ std r7, -32(r1); \ std r8, -40(r1); \ std r9, -48(r1); \ - stdu r10, -56(r1); \ + std r10, -56(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-56(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 56; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+56; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -329,7 +343,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -56(r1); \ std r10, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -337,18 +351,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - stdu r5, -8(r1); \ + std r5, -8(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 8; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -356,21 +371,22 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r5, -8(r1); \ - stdu r6, -16(r1); \ + std r6, -16(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 16; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -378,16 +394,17 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r5, -8(r1); \ std r6, -16(r1); \ - stdu r7, -24(r1); \ + std r7, -24(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 24; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -395,7 +412,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -24(r1); \ std r6, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -403,17 +420,18 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r5, -8(r1); \ std r6, -16(r1); \ std r7, -24(r1); \ - stdu r8, -32(r1); \ + std r8, -32(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-32(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 32; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+32;\ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -423,7 +441,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -32(r1); \ std r7, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -431,18 +449,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r5, -8(r1); \ std r6, -16(r1); \ std r7, -24(r1); \ std r8, -32(r1); \ - stdu r9, -40(r1); \ + std r9, -40(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-40(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 40; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+40; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -454,7 +473,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -40(r1); \ std r8, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -462,18 +481,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - stdu r6, -8(r1); \ + std r6, -8(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 8; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -481,21 +501,22 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r6, -8(r1); \ - stdu r7, -16(r1); \ + std r7, -16(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 16; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -503,16 +524,17 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r6, -8(r1); \ std r7, -16(r1); \ - stdu r8, -24(r1); \ + std r8, -24(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 24; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -520,7 +542,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -24(r1); \ std r6, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -528,18 +550,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - stdu r7, -8(r1); \ + std r7, -8(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 8; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -547,21 +570,22 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r7, -8(r1); \ - stdu r8, -16(r1); \ + std r8, -16(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 16; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -569,16 +593,17 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r7, -8(r1); \ std r8, -16(r1); \ - stdu r9, -24(r1); \ + std r9, -24(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 24; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -586,7 +611,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -24(r1); \ std r6, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -594,18 +619,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - stdu r8, -8(r1); \ + std r8, -8(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 8; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -613,21 +639,22 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r8, -8(r1); \ - stdu r9, -16(r1); \ + std r9, -16(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 16; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -635,16 +662,17 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r8, -8(r1); \ std r9, -16(r1); \ - stdu r10, -24(r1); \ + std r10, -24(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 24; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ @@ -652,7 +680,7 @@ _GLOBAL(_##API_NAME) \ ld r11, -24(r1); \ std r6, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -660,18 +688,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - stdu r9, -8(r1); \ + std r9, -8(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 8; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -679,21 +708,22 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r9, -8(r1); \ - stdu r10, -16(r1); \ + std r10, -16(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 16; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -701,23 +731,24 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ std r9, -8(r1); \ - stdu r10, -16(r1); \ + std r10, -16(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 16; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ - ld r11, 48+8*8(r1); \ + ld r11, STK_PARAM_AREA+8*8(r1); \ std r6, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -725,18 +756,19 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - stdu r10, -8(r1); \ + std r10, -8(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - addi r1, r1, 8; \ + addi r1, r1, STACK_FRAME_MIN_SIZE+8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -744,27 +776,29 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ \ - std r10, 48+8*7(r1); \ + std r10, STK_PARAM_AREA+8*7(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - ld r11, 48+8*7(r1); \ + addi r1, r1, STACK_FRAME_MIN_SIZE; \ + ld r11, STK_PARAM_AREA+8*7(r1); \ std r4, 0(r11); \ - ld r11, 48+8*8(r1); \ + ld r11, STK_PARAM_AREA+8*8(r1); \ std r5, 0(r11); \ - ld r11, 48+8*9(r1); \ + ld r11, STK_PARAM_AREA+8*9(r1); \ std r6, 0(r11); \ - ld r11, 48+8*10(r1); \ + ld r11, STK_PARAM_AREA+8*10(r1); \ std r7, 0(r11); \ - ld r11, 48+8*11(r1); \ + ld r11, STK_PARAM_AREA+8*11(r1); \ std r8, 0(r11); \ - ld r11, 48+8*12(r1); \ + ld r11, STK_PARAM_AREA+8*12(r1); \ std r9, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr @@ -772,15 +806,17 @@ _GLOBAL(_##API_NAME) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ - std r0, 16(r1); \ + std r0, LRSAVE(r1); \ + stdu r1, -STACK_FRAME_MIN_SIZE(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ - ld r11, 48+8*8(r1); \ + addi r1, r1, STACK_FRAME_MIN_SIZE; \ + ld r11, STK_PARAM_AREA+8*8(r1); \ std r4, 0(r11); \ \ - ld r0, 16(r1); \ + ld r0, LRSAVE(r1); \ mtlr r0; \ blr diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 49871427f599..a4ad4b49eef7 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c @@ -378,9 +378,9 @@ int ps3_send_event_locally(unsigned int virq) /** * ps3_sb_event_receive_port_setup - Setup a system bus event receive port. + * @dev: The system bus device instance. * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be * serviced on. - * @dev: The system bus device instance. * @virq: The assigned Linux virq. * * An event irq represents a virtual device interrupt. The interrupt_id @@ -743,8 +743,8 @@ void __init ps3_init_IRQ(void) unsigned cpu; struct irq_domain *host; - host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); - irq_set_default_host(host); + host = irq_domain_create_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); + irq_set_default_domain(host); for_each_possible_cpu(cpu) { struct ps3_private *pd = &per_cpu(ps3_private, cpu); diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c index 205763061a2d..b8c030eab138 100644 --- a/arch/powerpc/platforms/ps3/repository.c +++ b/arch/powerpc/platforms/ps3/repository.c @@ -73,9 +73,9 @@ static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4, static u64 make_first_field(const char *text, u64 index) { - u64 n; + u64 n = 0; - strncpy((char *)&n, text, 8); + memcpy((char *)&n, text, strnlen(text, sizeof(n))); return PS3_VENDOR_ID_NONE + (n >> 32) + index; } @@ -940,7 +940,7 @@ int __init ps3_repository_read_vuart_sysmgr_port(unsigned int *port) /** * ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area. - * address: lpar address of cell_ext_os_area + * @lpar_addr: lpar address of cell_ext_os_area * @size: size of cell_ext_os_area */ diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index d7495785fe47..150c09b58ae8 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -115,10 +115,7 @@ static void __init prealloc(struct ps3_prealloc *p) if (!p->size) return; - p->address = memblock_alloc(p->size, p->align); - if (!p->address) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, p->size, p->align); + p->address = memblock_alloc_or_panic(p->size, p->align); printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); @@ -264,9 +261,6 @@ static int __init ps3_probe(void) { DBG(" -> %s:%d\n", __func__, __LINE__); - if (!of_machine_is_compatible("sony,ps3")) - return 0; - ps3_os_area_save_params(); pm_power_off = ps3_power_off; @@ -291,6 +285,7 @@ static void ps3_kexec_cpu_down(int crash_shutdown, int secondary) define_machine(ps3) { .name = "PS3", + .compatible = "sony,ps3", .probe = ps3_probe, .setup_arch = ps3_setup_arch, .init_IRQ = ps3_init_IRQ, diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index 4a2520ec6d7f..61b37c9400b2 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -190,10 +190,10 @@ static void spu_unmap(struct spu *spu) static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; - unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO)); spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr, - sizeof(struct spe_shadow), shadow_flags); + sizeof(struct spe_shadow), + pgprot_noncached_wc(PAGE_KERNEL_RO)); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 2502e9b17df4..afbaabf182d0 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -333,10 +333,10 @@ int ps3_mmio_region_init(struct ps3_system_bus_device *dev, EXPORT_SYMBOL_GPL(ps3_mmio_region_init); static int ps3_system_bus_match(struct device *_dev, - struct device_driver *_drv) + const struct device_driver *_drv) { int result; - struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv); + const struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv); struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); if (!dev->match_sub_id) @@ -439,7 +439,7 @@ static void ps3_system_bus_shutdown(struct device *_dev) dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__); } -static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *env) +static int ps3_system_bus_uevent(const struct device *_dev, struct kobj_uevent_env *env) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); @@ -453,10 +453,9 @@ static ssize_t modalias_show(struct device *_dev, struct device_attribute *a, char *buf) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); - int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id, - dev->match_sub_id); - return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; + return sysfs_emit(buf, "ps3:%d:%d\n", dev->match_id, + dev->match_sub_id); } static DEVICE_ATTR_RO(modalias); @@ -466,7 +465,7 @@ static struct attribute *ps3_system_bus_dev_attrs[] = { }; ATTRIBUTE_GROUPS(ps3_system_bus_dev); -struct bus_type ps3_system_bus_type = { +static struct bus_type ps3_system_bus_type = { .name = "ps3_system_bus", .match = ps3_system_bus_match, .uevent = ps3_system_bus_uevent, @@ -695,7 +694,7 @@ static const struct dma_map_ops ps3_sb_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; @@ -709,7 +708,7 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index f4a647c1f0b2..fa3c2fff082a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -7,6 +7,7 @@ config PPC_PSERIES select OF_DYNAMIC select FORCE_PCI select PCI_MSI + select GENERIC_ALLOCATOR select PPC_XICS select PPC_XIVE_SPAPR select PPC_ICP_NATIVE @@ -21,15 +22,25 @@ config PPC_PSERIES select HOTPLUG_CPU select FORCE_SMP select SWIOTLB + select ARCH_SUPPORTS_PER_VMA_LOCK + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU default y +config PARAVIRT + bool + config PARAVIRT_SPINLOCKS bool +config PARAVIRT_TIME_ACCOUNTING + select PARAVIRT + bool + config PPC_SPLPAR bool "Support for shared-processor logical partitions" depends on PPC_PSERIES select PARAVIRT_SPINLOCKS if PPC_QUEUED_SPINLOCKS + select PARAVIRT_TIME_ACCOUNTING if VIRT_CPU_ACCOUNTING_GEN default y help Enabling this option will make the kernel run more efficiently @@ -118,6 +129,15 @@ config CMM will be reused for other LPARs. The interface allows firmware to balance memory across many LPARs. +config HTMDUMP + tristate "PowerVM data dumper" + depends on PPC_PSERIES && DEBUG_FS + default m + help + Select this option, if you want to enable the kernel debugfs + interface to dump the Hardware Trace Macro (HTM) function data + in the LPAR. + config HV_PERF_CTRS bool "Hypervisor supplied PMU events (24x7 & GPCI)" default y @@ -130,6 +150,20 @@ config HV_PERF_CTRS If unsure, select Y. +config VPA_PMU + tristate "VPA PMU events" + depends on KVM_BOOK3S_64_HV && HV_PERF_CTRS + help + Enable access to the VPA PMU counters via perf. This enables + code that support measurement for KVM on PowerVM(KoP) feature. + PAPR hypervisor has introduced three new counters in the VPA area + of LPAR CPUs for KVM L2 guest observability. Two for context switches + from host to guest and vice versa, and one counter for getting + the total time spent inside the KVM guest. This config enables code + that access these software counters via perf. + + If unsure, Select N. + config IBMVIO depends on PPC_PSERIES bool @@ -141,6 +175,25 @@ config IBMEBUS help Bus device driver for GX bus based adapters. +config PSERIES_PLPKS + depends on PPC_PSERIES + select NLS + bool + # PowerVM provides an isolated Platform Keystore (PKS) storage + # allocation for each LPAR with individually managed access + # controls to store sensitive information securely. It can be + # used to store asymmetric public keys or secrets as required + # by different usecases. + # + # This option is selected by in-kernel consumers that require + # access to the PKS. + +config PSERIES_PLPKS_SED + depends on PPC_PSERIES + bool + # This option is selected by in-kernel consumers that require + # access to the SED PKS keystore. + config PAPR_SCM depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM tristate "Support for the PAPR Storage Class Memory interface" diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 7aaff5323544..57222678bb3f 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -1,13 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG obj-y := lpar.o hvCall.o nvram.o reconfig.o \ - of_helpers.o \ + of_helpers.o rtas-work-area.o papr-sysparm.o \ + papr-rtas-common.o papr-vpd.o papr-indices.o \ + papr-platform-dump.o papr-phy-attest.o \ setup.o iommu.o event_sources.o ras.o \ firmware.o power.o dlpar.o mobility.o rng.o \ pci.o pci_dlpar.o eeh_pseries.o msi.o \ - papr_platform_attributes.o + papr_platform_attributes.o dtl.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KEXEC_CORE) += kexec.o obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o @@ -19,7 +20,7 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o obj-$(CONFIG_HVCS) += hvcserver.o obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o obj-$(CONFIG_CMM) += cmm.o -obj-$(CONFIG_DTL) += dtl.o +obj-$(CONFIG_HTMDUMP) += htmdump.o obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o obj-$(CONFIG_LPARCFG) += lparcfg.o obj-$(CONFIG_IBMVIO) += vio.o @@ -28,7 +29,9 @@ obj-$(CONFIG_PAPR_SCM) += papr_scm.o obj-$(CONFIG_PPC_SPLPAR) += vphn.o obj-$(CONFIG_PPC_SVM) += svm.o obj-$(CONFIG_FA_DUMP) += rtas-fadump.o - +obj-$(CONFIG_PSERIES_PLPKS) += plpks.o +obj-$(CONFIG_PPC_SECURE_BOOT) += plpks-secvar.o +obj-$(CONFIG_PSERIES_PLPKS_SED) += plpks_sed_ops.o obj-$(CONFIG_SUSPEND) += suspend.o obj-$(CONFIG_PPC_VAS) += vas.o vas-sysfs.o diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 498d6efcb5ae..213aa26dc8b3 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -22,6 +22,8 @@ #include <asm/machdep.h> #include <linux/uaccess.h> #include <asm/rtas.h> +#include <asm/rtas-work-area.h> +#include <asm/prom.h> static struct workqueue_struct *pseries_hp_wq; @@ -137,37 +139,27 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, struct property *property; struct property *last_property = NULL; struct cc_workarea *ccwa; + struct rtas_work_area *work_area; char *data_buf; int cc_token; int rc = -1; - cc_token = rtas_token("ibm,configure-connector"); + cc_token = rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR); if (cc_token == RTAS_UNKNOWN_SERVICE) return NULL; - data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); - if (!data_buf) - return NULL; + work_area = rtas_work_area_alloc(SZ_4K); + data_buf = rtas_work_area_raw_buf(work_area); ccwa = (struct cc_workarea *)&data_buf[0]; ccwa->drc_index = drc_index; ccwa->zero = 0; do { - /* Since we release the rtas_data_buf lock between configure - * connector calls we want to re-populate the rtas_data_buffer - * with the contents of the previous call. - */ - spin_lock(&rtas_data_buf_lock); - - memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); - rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); - memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); - - spin_unlock(&rtas_data_buf_lock); - - if (rtas_busy_delay(rc)) - continue; + do { + rc = rtas_call(cc_token, 2, 1, NULL, + rtas_work_area_phys(work_area), NULL); + } while (rtas_busy_delay(rc)); switch (rc) { case COMPLETE: @@ -227,7 +219,7 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, } while (rc); cc_error: - kfree(data_buf); + rtas_work_area_free(work_area); if (rc) { if (first_dn) @@ -259,11 +251,8 @@ int dlpar_detach_node(struct device_node *dn) struct device_node *child; int rc; - child = of_get_next_child(dn, NULL); - while (child) { + for_each_child_of_node(dn, child) dlpar_detach_node(child); - child = of_get_next_child(dn, child); - } rc = of_detach_node(dn); if (rc) @@ -273,6 +262,20 @@ int dlpar_detach_node(struct device_node *dn) return 0; } +static int dlpar_changeset_attach_cc_nodes(struct of_changeset *ocs, + struct device_node *dn) +{ + int rc; + + rc = of_changeset_attach_node(ocs, dn); + + if (!rc && dn->child) + rc = dlpar_changeset_attach_cc_nodes(ocs, dn->child); + if (!rc && dn->sibling) + rc = dlpar_changeset_attach_cc_nodes(ocs, dn->sibling); + + return rc; +} #define DR_ENTITY_SENSE 9003 #define DR_ENTITY_PRESENT 1 @@ -339,27 +342,206 @@ int dlpar_unisolate_drc(u32 drc_index) return 0; } -int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +static struct device_node * +get_device_node_with_drc_index(u32 index) +{ + struct device_node *np = NULL; + u32 node_index; + int rc; + + for_each_node_with_property(np, "ibm,my-drc-index") { + rc = of_property_read_u32(np, "ibm,my-drc-index", + &node_index); + if (rc) { + pr_err("%s: %pOF: of_property_read_u32 %s: %d\n", + __func__, np, "ibm,my-drc-index", rc); + of_node_put(np); + return NULL; + } + + if (index == node_index) + break; + } + + return np; +} + +static struct device_node * +get_device_node_with_drc_info(u32 index) +{ + struct device_node *np = NULL; + struct of_drc_info drc; + struct property *info; + const __be32 *value; + u32 node_index; + int i, j, count; + + for_each_node_with_property(np, "ibm,drc-info") { + info = of_find_property(np, "ibm,drc-info", NULL); + if (info == NULL) { + /* XXX can this happen? */ + of_node_put(np); + return NULL; + } + value = of_prop_next_u32(info, NULL, &count); + if (value == NULL) + continue; + value++; + for (i = 0; i < count; i++) { + if (of_read_drc_info_cell(&info, &value, &drc)) + break; + if (index > drc.last_drc_index) + continue; + node_index = drc.drc_index_start; + for (j = 0; j < drc.num_sequential_elems; j++) { + if (index == node_index) + return np; + node_index += drc.sequential_inc; + } + } + } + + return NULL; +} + +static int dlpar_hp_dt_add(u32 index) +{ + struct device_node *np, *nodes; + struct of_changeset ocs; + int rc; + + /* + * Do not add device node(s) if already exists in the + * device tree. + */ + np = get_device_node_with_drc_index(index); + if (np) { + pr_err("%s: Adding device node for index (%d), but " + "already exists in the device tree\n", + __func__, index); + rc = -EINVAL; + goto out; + } + + np = get_device_node_with_drc_info(index); + + if (!np) + return -EIO; + + /* Next, configure the connector. */ + nodes = dlpar_configure_connector(cpu_to_be32(index), np); + if (!nodes) { + rc = -EIO; + goto out; + } + + /* + * Add the new nodes from dlpar_configure_connector() onto + * the device-tree. + */ + of_changeset_init(&ocs); + rc = dlpar_changeset_attach_cc_nodes(&ocs, nodes); + + if (!rc) + rc = of_changeset_apply(&ocs); + else + dlpar_free_cc_nodes(nodes); + + of_changeset_destroy(&ocs); + +out: + of_node_put(np); + return rc; +} + +static int changeset_detach_node_recursive(struct of_changeset *ocs, + struct device_node *node) +{ + struct device_node *child; + int rc; + + for_each_child_of_node(node, child) { + rc = changeset_detach_node_recursive(ocs, child); + if (rc) { + of_node_put(child); + return rc; + } + } + + return of_changeset_detach_node(ocs, node); +} + +static int dlpar_hp_dt_remove(u32 drc_index) +{ + struct device_node *np; + struct of_changeset ocs; + u32 index; + int rc = 0; + + /* + * Prune all nodes with a matching index. + */ + of_changeset_init(&ocs); + + for_each_node_with_property(np, "ibm,my-drc-index") { + rc = of_property_read_u32(np, "ibm,my-drc-index", &index); + if (rc) { + pr_err("%s: %pOF: of_property_read_u32 %s: %d\n", + __func__, np, "ibm,my-drc-index", rc); + of_node_put(np); + goto out; + } + + if (index == drc_index) { + rc = changeset_detach_node_recursive(&ocs, np); + if (rc) { + of_node_put(np); + goto out; + } + } + } + + rc = of_changeset_apply(&ocs); + +out: + of_changeset_destroy(&ocs); + return rc; +} + +static int dlpar_hp_dt(struct pseries_hp_errorlog *phpe) { + u32 drc_index; int rc; - /* pseries error logs are in BE format, convert to cpu type */ - switch (hp_elog->id_type) { - case PSERIES_HP_ELOG_ID_DRC_COUNT: - hp_elog->_drc_u.drc_count = - be32_to_cpu(hp_elog->_drc_u.drc_count); + if (phpe->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX) + return -EINVAL; + + drc_index = be32_to_cpu(phpe->_drc_u.drc_index); + + lock_device_hotplug(); + + switch (phpe->action) { + case PSERIES_HP_ELOG_ACTION_ADD: + rc = dlpar_hp_dt_add(drc_index); break; - case PSERIES_HP_ELOG_ID_DRC_INDEX: - hp_elog->_drc_u.drc_index = - be32_to_cpu(hp_elog->_drc_u.drc_index); + case PSERIES_HP_ELOG_ACTION_REMOVE: + rc = dlpar_hp_dt_remove(drc_index); + break; + default: + pr_err("Invalid action (%d) specified\n", phpe->action); + rc = -EINVAL; break; - case PSERIES_HP_ELOG_ID_DRC_IC: - hp_elog->_drc_u.ic.count = - be32_to_cpu(hp_elog->_drc_u.ic.count); - hp_elog->_drc_u.ic.index = - be32_to_cpu(hp_elog->_drc_u.ic.index); } + unlock_device_hotplug(); + + return rc; +} + +int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +{ + int rc; + switch (hp_elog->resource) { case PSERIES_HP_ELOG_RESOURCE_MEM: rc = dlpar_memory(hp_elog); @@ -370,6 +552,9 @@ int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_RESOURCE_PMEM: rc = dlpar_hp_pmem(hp_elog); break; + case PSERIES_HP_ELOG_RESOURCE_DT: + rc = dlpar_hp_dt(hp_elog); + break; default: pr_warn_ratelimited("Invalid resource (%d) specified\n", @@ -422,6 +607,8 @@ static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog) hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; } else if (sysfs_streq(arg, "cpu")) { hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; + } else if (sysfs_streq(arg, "dt")) { + hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_DT; } else { pr_err("Invalid resource specified.\n"); return -EINVAL; @@ -521,7 +708,7 @@ static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog) return 0; } -static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, +static ssize_t dlpar_store(const struct class *class, const struct class_attribute *attr, const char *buf, size_t count) { struct pseries_hp_errorlog hp_elog; @@ -560,10 +747,10 @@ dlpar_store_out: return rc ? rc : count; } -static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, +static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", "memory,cpu"); + return sprintf(buf, "%s\n", "memory,cpu,dt"); } static CLASS_ATTR_RW(dlpar); @@ -573,8 +760,7 @@ int __init dlpar_workqueue_init(void) if (pseries_hp_wq) return 0; - pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", - WQ_UNBOUND, 1); + pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0); return pseries_hp_wq ? 0 : -ENOMEM; } diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 352af5b14a0f..f293588b8c7b 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -18,6 +18,7 @@ #include <asm/plpar_wrappers.h> #include <asm/machdep.h> +#ifdef CONFIG_DTL struct dtl { struct dtl_entry *buf; int cpu; @@ -37,6 +38,15 @@ static u8 dtl_event_mask = DTL_LOG_ALL; static int dtl_buf_entries = N_DISPATCH_LOG; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + +/* + * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls + * reading from the dispatch trace log. If other code wants to consume + * DTL entries, it can set this pointer to a function that will get + * called once for each DTL entry that gets processed. + */ +static void (*dtl_consumer)(struct dtl_entry *entry, u64 index); + struct dtl_ring { u64 write_index; struct dtl_entry *write_ptr; @@ -181,7 +191,7 @@ static int dtl_enable(struct dtl *dtl) return -EBUSY; /* ensure there are no other conflicting dtl users */ - if (!read_trylock(&dtl_access_lock)) + if (!down_read_trylock(&dtl_access_lock)) return -EBUSY; n_entries = dtl_buf_entries; @@ -189,7 +199,7 @@ static int dtl_enable(struct dtl *dtl) if (!buf) { printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", __func__, dtl->cpu); - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); return -ENOMEM; } @@ -207,7 +217,7 @@ static int dtl_enable(struct dtl *dtl) spin_unlock(&dtl->lock); if (rc) { - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); kmem_cache_free(dtl_cache, buf); } @@ -222,7 +232,7 @@ static void dtl_disable(struct dtl *dtl) dtl->buf = NULL; dtl->buf_entries = 0; spin_unlock(&dtl->lock); - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); } /* file interface */ @@ -315,7 +325,6 @@ static const struct file_operations dtl_fops = { .open = dtl_file_open, .release = dtl_file_release, .read = dtl_file_read, - .llseek = no_llseek, }; static struct dentry *dtl_dir; @@ -355,3 +364,81 @@ static int dtl_init(void) return 0; } machine_arch_initcall(pseries, dtl_init); +#endif /* CONFIG_DTL */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +/* + * Scan the dispatch trace log and count up the stolen time. + * Should be called with interrupts disabled. + */ +static notrace u64 scan_dispatch_log(u64 stop_tb) +{ + u64 i = local_paca->dtl_ridx; + struct dtl_entry *dtl = local_paca->dtl_curr; + struct dtl_entry *dtl_end = local_paca->dispatch_log_end; + struct lppaca *vpa = local_paca->lppaca_ptr; + u64 tb_delta; + u64 stolen = 0; + u64 dtb; + + if (!dtl) + return 0; + + if (i == be64_to_cpu(vpa->dtl_idx)) + return 0; + while (i < be64_to_cpu(vpa->dtl_idx)) { + dtb = be64_to_cpu(dtl->timebase); + tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + + be32_to_cpu(dtl->ready_to_enqueue_time); + barrier(); + if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { + /* buffer has overflowed */ + i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; + dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); + continue; + } + if (dtb > stop_tb) + break; +#ifdef CONFIG_DTL + if (dtl_consumer) + dtl_consumer(dtl, i); +#endif + stolen += tb_delta; + ++i; + ++dtl; + if (dtl == dtl_end) + dtl = local_paca->dispatch_log; + } + local_paca->dtl_ridx = i; + local_paca->dtl_curr = dtl; + return stolen; +} + +/* + * Accumulate stolen time by scanning the dispatch trace log. + * Called on entry from user mode. + */ +void notrace pseries_accumulate_stolen_time(void) +{ + u64 sst, ust; + struct cpu_accounting_data *acct = &local_paca->accounting; + + sst = scan_dispatch_log(acct->starttime_user); + ust = scan_dispatch_log(acct->starttime); + acct->stime -= sst; + acct->utime -= ust; + acct->steal_time += ust + sst; +} + +u64 pseries_calculate_stolen_time(u64 stop_tb) +{ + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return 0; + + if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) + return scan_dispatch_log(stop_tb); + + return 0; +} + +#endif diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 1b0c901a6f3b..b12ef382fec7 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -71,7 +71,7 @@ static void pseries_pcibios_bus_add_device(struct pci_dev *pdev) if (pdev->is_virtfn) { /* * FIXME: This really should be handled by choosing the right - * parent PE in in pseries_eeh_init_edev(). + * parent PE in pseries_eeh_init_edev(). */ struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe; struct eeh_dev *edev = pdn_to_eeh_dev(pdn); @@ -154,7 +154,7 @@ static int pseries_eeh_get_pe_config_addr(struct pci_dn *pdn) /** * pseries_eeh_phb_reset - Reset the specified PHB * @phb: PCI controller - * @config_adddr: the associated config address + * @config_addr: the associated config address * @option: reset option * * Reset the specified PHB/PE @@ -188,7 +188,7 @@ static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, in /** * pseries_eeh_phb_configure_bridge - Configure PCI bridges in the indicated PE * @phb: PCI controller - * @config_adddr: the associated config address + * @config_addr: the associated config address * * The function will be called to reconfigure the bridges included * in the specified PE so that the mulfunctional PE would be recovered @@ -252,7 +252,7 @@ static int pseries_eeh_cap_start(struct pci_dn *pdn) if (!pdn) return 0; - rtas_read_config(pdn, PCI_STATUS, 2, &status); + rtas_pci_dn_read_config(pdn, PCI_STATUS, 2, &status); if (!(status & PCI_STATUS_CAP_LIST)) return 0; @@ -270,11 +270,11 @@ static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap) return 0; while (cnt--) { - rtas_read_config(pdn, pos, 1, &pos); + rtas_pci_dn_read_config(pdn, pos, 1, &pos); if (pos < 0x40) break; pos &= ~3; - rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); + rtas_pci_dn_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id); if (id == 0xff) break; if (id == cap) @@ -294,7 +294,7 @@ static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap) if (!edev || !edev->pcie_cap) return 0; - if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) + if (rtas_pci_dn_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) return 0; else if (!header) return 0; @@ -307,7 +307,7 @@ static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap) if (pos < 256) break; - if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) + if (rtas_pci_dn_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) break; } @@ -412,8 +412,8 @@ static void pseries_eeh_init_edev(struct pci_dn *pdn) if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { edev->mode |= EEH_DEV_BRIDGE; if (edev->pcie_cap) { - rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, - 2, &pcie_flags); + rtas_pci_dn_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, + 2, &pcie_flags); pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) edev->mode |= EEH_DEV_ROOT_PORT; @@ -580,8 +580,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) switch(rets[0]) { case 0: - result = EEH_STATE_MMIO_ACTIVE | - EEH_STATE_DMA_ACTIVE; + result = EEH_STATE_MMIO_ACTIVE | + EEH_STATE_DMA_ACTIVE | + EEH_STATE_MMIO_ENABLED | + EEH_STATE_DMA_ENABLED; break; case 1: result = EEH_STATE_RESET_ACTIVE | @@ -676,7 +678,7 @@ static int pseries_eeh_read_config(struct eeh_dev *edev, int where, int size, u3 { struct pci_dn *pdn = eeh_dev_to_pdn(edev); - return rtas_read_config(pdn, where, size, val); + return rtas_pci_dn_read_config(pdn, where, size, val); } /** @@ -692,14 +694,14 @@ static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u { struct pci_dn *pdn = eeh_dev_to_pdn(edev); - return rtas_write_config(pdn, where, size, val); + return rtas_pci_dn_write_config(pdn, where, size, val); } #ifdef CONFIG_PCI_IOV static int pseries_send_allow_unfreeze(struct pci_dn *pdn, u16 *vf_pe_array, int cur_vfs) { int rc; - int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze"); + int ibm_allow_unfreeze = rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_ALLOW_UNFREEZE); unsigned long buid, addr; addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); @@ -774,7 +776,7 @@ static int pseries_notify_resume(struct eeh_dev *edev) if (!edev) return -EEXIST; - if (rtas_token("ibm,open-sriov-allow-unfreeze") == RTAS_UNKNOWN_SERVICE) + if (rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_ALLOW_UNFREEZE) == RTAS_UNKNOWN_SERVICE) return -EINVAL; if (edev->pdev->is_physfn || edev->pdev->is_virtfn) @@ -784,6 +786,43 @@ static int pseries_notify_resume(struct eeh_dev *edev) } #endif +/** + * pseries_eeh_err_inject - Inject specified error to the indicated PE + * @pe: the indicated PE + * @type: error type + * @func: specific error type + * @addr: address + * @mask: address mask + * The routine is called to inject specified error, which is + * determined by @type and @func, to the indicated PE + */ +static int pseries_eeh_err_inject(struct eeh_pe *pe, int type, int func, + unsigned long addr, unsigned long mask) +{ + struct eeh_dev *pdev; + + /* Check on PCI error type */ + if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) + return -EINVAL; + + switch (func) { + case EEH_ERR_FUNC_LD_MEM_ADDR: + case EEH_ERR_FUNC_LD_MEM_DATA: + case EEH_ERR_FUNC_ST_MEM_ADDR: + case EEH_ERR_FUNC_ST_MEM_DATA: + /* injects a MMIO error for all pdev's belonging to PE */ + pci_lock_rescan_remove(); + list_for_each_entry(pdev, &pe->edevs, entry) + eeh_pe_inject_mmio_error(pdev->pdev); + pci_unlock_rescan_remove(); + break; + default: + return -ERANGE; + } + + return 0; +} + static struct eeh_ops pseries_eeh_ops = { .name = "pseries", .probe = pseries_eeh_probe, @@ -792,7 +831,7 @@ static struct eeh_ops pseries_eeh_ops = { .reset = pseries_eeh_reset, .get_log = pseries_eeh_get_log, .configure_bridge = pseries_eeh_configure_bridge, - .err_inject = NULL, + .err_inject = pseries_eeh_err_inject, .read_config = pseries_eeh_read_config, .write_config = pseries_eeh_write_config, .next_error = NULL, @@ -815,14 +854,14 @@ static int __init eeh_pseries_init(void) int ret, config_addr; /* figure out EEH RTAS function call tokens */ - ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); - ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); - ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); - ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); - ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); - ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); - ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); - ibm_configure_pe = rtas_token("ibm,configure-pe"); + ibm_set_eeh_option = rtas_function_token(RTAS_FN_IBM_SET_EEH_OPTION); + ibm_set_slot_reset = rtas_function_token(RTAS_FN_IBM_SET_SLOT_RESET); + ibm_read_slot_reset_state2 = rtas_function_token(RTAS_FN_IBM_READ_SLOT_RESET_STATE2); + ibm_read_slot_reset_state = rtas_function_token(RTAS_FN_IBM_READ_SLOT_RESET_STATE); + ibm_slot_error_detail = rtas_function_token(RTAS_FN_IBM_SLOT_ERROR_DETAIL); + ibm_get_config_addr_info2 = rtas_function_token(RTAS_FN_IBM_GET_CONFIG_ADDR_INFO2); + ibm_get_config_addr_info = rtas_function_token(RTAS_FN_IBM_GET_CONFIG_ADDR_INFO); + ibm_configure_pe = rtas_function_token(RTAS_FN_IBM_CONFIGURE_PE); /* * ibm,configure-pe and ibm,configure-bridge have the same semantics, @@ -830,7 +869,7 @@ static int __init eeh_pseries_init(void) * ibm,configure-pe then fall back to using ibm,configure-bridge. */ if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) - ibm_configure_pe = rtas_token("ibm,configure-bridge"); + ibm_configure_pe = rtas_function_token(RTAS_FN_IBM_CONFIGURE_BRIDGE); /* * Necessary sanity check. We needn't check "get-config-addr-info" @@ -848,16 +887,7 @@ static int __init eeh_pseries_init(void) } /* Initialize error log size */ - eeh_error_buf_size = rtas_token("rtas-error-log-max"); - if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { - pr_info("%s: unknown EEH error log size\n", - __func__); - eeh_error_buf_size = 1024; - } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { - pr_info("%s: EEH error log size %d exceeds the maximal %d\n", - __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); - eeh_error_buf_size = RTAS_ERROR_LOG_MAX; - } + eeh_error_buf_size = rtas_get_error_log_max(); /* Set EEH probe mode */ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index 09c119b2f623..18447e5fa17d 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c @@ -67,6 +67,8 @@ hypertas_fw_features_table[] = { {FW_FEATURE_PAPR_SCM, "hcall-scm"}, {FW_FEATURE_RPT_INVALIDATE, "hcall-rpt-invalidate"}, {FW_FEATURE_ENERGY_SCALE_INFO, "hcall-energy-scale-info"}, + {FW_FEATURE_WATCHDOG, "hcall-watchdog"}, + {FW_FEATURE_PLPKS, "hcall-pks"}, }; /* Build up the firmware features bitmask using the contents of diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 0f8cd8b06432..bc6926dbf148 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -33,6 +33,7 @@ #include <asm/xive.h> #include <asm/plpar_wrappers.h> #include <asm/topology.h> +#include <asm/systemcfg.h> #include "pseries.h" @@ -70,6 +71,7 @@ static void pseries_cpu_offline_self(void) xics_teardown_cpu(); unregister_slb_shadow(hwcpu); + unregister_vpa(hwcpu); rtas_stop_self(); /* Should never get here... */ @@ -82,7 +84,9 @@ static int pseries_cpu_disable(void) int cpu = smp_processor_id(); set_cpu_online(cpu, false); - vdso_data->processorCount--; +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->processorCount--; +#endif /*fix boot_cpuid here*/ if (cpu == boot_cpuid) @@ -397,6 +401,14 @@ static int dlpar_online_cpu(struct device_node *dn) for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != thread) continue; + + if (!topology_is_primary_thread(cpu)) { + if (cpu_smt_control != CPU_SMT_ENABLED) + break; + if (!topology_smt_thread_allowed(cpu)) + break; + } + cpu_maps_update_done(); find_and_update_cpu_nid(cpu); rc = device_online(get_cpu_device(cpu)); @@ -492,7 +504,7 @@ static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) bool found = false; int rc, index; - if (of_find_property(parent, "ibm,drc-info", NULL)) + if (of_property_present(parent, "ibm,drc-info")) return drc_info_valid_index(parent, drc_index); /* Note that the format of the ibm,drc-indexes array is @@ -619,17 +631,21 @@ static ssize_t dlpar_cpu_add(u32 drc_index) static unsigned int pseries_cpuhp_cache_use_count(const struct device_node *cachedn) { unsigned int use_count = 0; - struct device_node *dn; + struct device_node *dn, *tn; WARN_ON(!of_node_is_type(cachedn, "cache")); for_each_of_cpu_node(dn) { - if (of_find_next_cache_node(dn) == cachedn) + tn = of_find_next_cache_node(dn); + of_node_put(tn); + if (tn == cachedn) use_count++; } for_each_node_by_type(dn, "cache") { - if (of_find_next_cache_node(dn) == cachedn) + tn = of_find_next_cache_node(dn); + of_node_put(tn); + if (tn == cachedn) use_count++; } @@ -649,10 +665,13 @@ static int pseries_cpuhp_detach_nodes(struct device_node *cpudn) dn = cpudn; while ((dn = of_find_next_cache_node(dn))) { - if (pseries_cpuhp_cache_use_count(dn) > 1) + if (pseries_cpuhp_cache_use_count(dn) > 1) { + of_node_put(dn); break; + } ret = of_changeset_detach_node(&cs, dn); + of_node_put(dn); if (ret) goto out; } @@ -741,7 +760,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) u32 drc_index; int rc; - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); lock_device_hotplug(); @@ -837,29 +856,33 @@ static struct notifier_block pseries_smp_nb = { .notifier_call = pseries_smp_notifier, }; -static int __init pseries_cpu_hotplug_init(void) +void __init pseries_cpu_hotplug_init(void) { int qcss_tok; - unsigned int node; - -#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE - ppc_md.cpu_probe = dlpar_cpu_probe; - ppc_md.cpu_release = dlpar_cpu_release; -#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ - rtas_stop_self_token = rtas_token("stop-self"); - qcss_tok = rtas_token("query-cpu-stopped-state"); + rtas_stop_self_token = rtas_function_token(RTAS_FN_STOP_SELF); + qcss_tok = rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE); if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || qcss_tok == RTAS_UNKNOWN_SERVICE) { printk(KERN_INFO "CPU Hotplug not supported by firmware " "- disabling.\n"); - return 0; + return; } smp_ops->cpu_offline_self = pseries_cpu_offline_self; smp_ops->cpu_disable = pseries_cpu_disable; smp_ops->cpu_die = pseries_cpu_die; +} + +static int __init pseries_dlpar_init(void) +{ + unsigned int node; + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE + ppc_md.cpu_probe = dlpar_cpu_probe; + ppc_md.cpu_release = dlpar_cpu_release; +#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ /* Processors can be added/removed only on LPAR */ if (firmware_has_feature(FW_FEATURE_LPAR)) { @@ -878,4 +901,4 @@ static int __init pseries_cpu_hotplug_init(void) return 0; } -machine_arch_initcall(pseries, pseries_cpu_hotplug_init); +machine_arch_initcall(pseries, pseries_dlpar_init); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 2e3a317722a8..38dc4f7c9296 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -21,54 +21,6 @@ #include <asm/drmem.h> #include "pseries.h" -unsigned long pseries_memory_block_size(void) -{ - struct device_node *np; - u64 memblock_size = MIN_MEMORY_BLOCK_SIZE; - struct resource r; - - np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); - if (np) { - int len; - int size_cells; - const __be32 *prop; - - size_cells = of_n_size_cells(np); - - prop = of_get_property(np, "ibm,lmb-size", &len); - if (prop && len >= size_cells * sizeof(__be32)) - memblock_size = of_read_number(prop, size_cells); - of_node_put(np); - - } else if (machine_is(pseries)) { - /* This fallback really only applies to pseries */ - unsigned int memzero_size = 0; - - np = of_find_node_by_path("/memory@0"); - if (np) { - if (!of_address_to_resource(np, 0, &r)) - memzero_size = resource_size(&r); - of_node_put(np); - } - - if (memzero_size) { - /* We now know the size of memory@0, use this to find - * the first memoryblock and get its size. - */ - char buf[64]; - - sprintf(buf, "/memory@%x", memzero_size); - np = of_find_node_by_path(buf); - if (np) { - if (!of_address_to_resource(np, 0, &r)) - memblock_size = resource_size(&r); - of_node_put(np); - } - } - } - return memblock_size; -} - static void dlpar_free_property(struct property *prop) { kfree(prop->name); @@ -103,7 +55,8 @@ static bool find_aa_index(struct device_node *dr_node, struct property *ala_prop, const u32 *lmb_assoc, u32 *aa_index) { - u32 *assoc_arrays, new_prop_size; + __be32 *assoc_arrays; + u32 new_prop_size; struct property *new_prop; int aa_arrays, aa_array_entries, aa_array_sz; int i, index; @@ -255,8 +208,10 @@ static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online) int rc; mem_block = lmb_to_memblock(lmb); - if (!mem_block) + if (!mem_block) { + pr_err("Failed memory block lookup for LMB 0x%x\n", lmb->drc_index); return -EINVAL; + } if (online && mem_block->dev.offline) rc = device_online(&mem_block->dev); @@ -283,7 +238,7 @@ static int dlpar_offline_lmb(struct drmem_lmb *lmb) static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size) { - unsigned long block_sz, start_pfn; + unsigned long start_pfn; int sections_per_block; int i; @@ -294,8 +249,7 @@ static int pseries_remove_memblock(unsigned long base, unsigned long memblock_si if (!pfn_valid(start_pfn)) goto out; - block_sz = pseries_memory_block_size(); - sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; + sections_per_block = memory_block_size / MIN_MEMORY_BLOCK_SIZE; for (i = 0; i < sections_per_block; i++) { __remove_memory(base, MIN_MEMORY_BLOCK_SIZE); @@ -311,11 +265,8 @@ out: static int pseries_remove_mem_node(struct device_node *np) { - const __be32 *prop; - unsigned long base; - unsigned long lmb_size; - int ret = -EINVAL; - int addr_cells, size_cells; + int ret; + struct resource res; /* * Check to see if we are actually removing memory @@ -326,21 +277,11 @@ static int pseries_remove_mem_node(struct device_node *np) /* * Find the base address and size of the memblock */ - prop = of_get_property(np, "reg", NULL); - if (!prop) + ret = of_address_to_resource(np, 0, &res); + if (ret) return ret; - addr_cells = of_n_addr_cells(np); - size_cells = of_n_size_cells(np); - - /* - * "reg" property represents (addr,size) tuple. - */ - base = of_read_number(prop, addr_cells); - prop += addr_cells; - lmb_size = of_read_number(prop, size_cells); - - pseries_remove_memblock(base, lmb_size); + pseries_remove_memblock(res.start, resource_size(&res)); return 0; } @@ -367,7 +308,6 @@ static int dlpar_add_lmb(struct drmem_lmb *); static int dlpar_remove_lmb(struct drmem_lmb *lmb) { struct memory_block *mem_block; - unsigned long block_sz; int rc; if (!lmb_is_removable(lmb)) @@ -383,13 +323,11 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb) return rc; } - block_sz = pseries_memory_block_size(); - - __remove_memory(lmb->base_addr, block_sz); + __remove_memory(lmb->base_addr, memory_block_size); put_device(&mem_block->dev); /* Update memory regions for memory remove */ - memblock_remove(lmb->base_addr, block_sz); + memblock_remove(lmb->base_addr, memory_block_size); invalidate_lmb_associativity_index(lmb); lmb->flags &= ~DRCONF_MEM_ASSIGNED; @@ -500,14 +438,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index) } } - if (!lmb_found) + if (!lmb_found) { + pr_debug("Failed to look up LMB for drc index %x\n", drc_index); rc = -EINVAL; - - if (rc) + } else if (rc) { pr_debug("Failed to hot-remove memory at %llx\n", lmb->base_addr); - else + } else { pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr); + } return rc; } @@ -639,6 +578,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) rc = update_lmb_associativity_index(lmb); if (rc) { dlpar_release_drc(lmb->drc_index); + pr_err("Failed to configure LMB 0x%x\n", lmb->drc_index); return rc; } @@ -650,14 +590,16 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) nid = first_online_node; /* Add the memory */ - rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE); + rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_MEMMAP_ON_MEMORY); if (rc) { + pr_err("Failed to add LMB 0x%x to node %u", lmb->drc_index, nid); invalidate_lmb_associativity_index(lmb); return rc; } rc = dlpar_online_lmb(lmb); if (rc) { + pr_err("Failed to online LMB 0x%x on node %u\n", lmb->drc_index, nid); __remove_memory(lmb->base_addr, block_sz); invalidate_lmb_associativity_index(lmb); } else { @@ -875,16 +817,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_ADD: switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: - count = hp_elog->_drc_u.drc_count; + count = be32_to_cpu(hp_elog->_drc_u.drc_count); rc = dlpar_memory_add_by_count(count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); rc = dlpar_memory_add_by_index(drc_index); break; case PSERIES_HP_ELOG_ID_DRC_IC: - count = hp_elog->_drc_u.ic.count; - drc_index = hp_elog->_drc_u.ic.index; + count = be32_to_cpu(hp_elog->_drc_u.ic.count); + drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); rc = dlpar_memory_add_by_ic(count, drc_index); break; default: @@ -896,16 +838,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_REMOVE: switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: - count = hp_elog->_drc_u.drc_count; + count = be32_to_cpu(hp_elog->_drc_u.drc_count); rc = dlpar_memory_remove_by_count(count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); rc = dlpar_memory_remove_by_index(drc_index); break; case PSERIES_HP_ELOG_ID_DRC_IC: - count = hp_elog->_drc_u.ic.count; - drc_index = hp_elog->_drc_u.ic.index; + count = be32_to_cpu(hp_elog->_drc_u.ic.count); + drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); rc = dlpar_memory_remove_by_ic(count, drc_index); break; default: @@ -929,11 +871,8 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) static int pseries_add_mem_node(struct device_node *np) { - const __be32 *prop; - unsigned long base; - unsigned long lmb_size; - int ret = -EINVAL; - int addr_cells, size_cells; + int ret; + struct resource res; /* * Check to see if we are actually adding memory @@ -944,23 +883,14 @@ static int pseries_add_mem_node(struct device_node *np) /* * Find the base and size of the memblock */ - prop = of_get_property(np, "reg", NULL); - if (!prop) + ret = of_address_to_resource(np, 0, &res); + if (ret) return ret; - addr_cells = of_n_addr_cells(np); - size_cells = of_n_size_cells(np); - /* - * "reg" property represents (addr,size) tuple. - */ - base = of_read_number(prop, addr_cells); - prop += addr_cells; - lmb_size = of_read_number(prop, size_cells); - /* * Update memory region to represent the memory add */ - ret = memblock_add(base, lmb_size); + ret = memblock_add(res.start, resource_size(&res)); return (ret < 0) ? -EINVAL : 0; } diff --git a/arch/powerpc/platforms/pseries/htmdump.c b/arch/powerpc/platforms/pseries/htmdump.c new file mode 100644 index 000000000000..742ec52c9d4d --- /dev/null +++ b/arch/powerpc/platforms/pseries/htmdump.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) IBM Corporation, 2024 + */ + +#define pr_fmt(fmt) "htmdump: " fmt + +#include <linux/debugfs.h> +#include <linux/module.h> +#include <asm/io.h> +#include <asm/machdep.h> +#include <asm/plpar_wrappers.h> +#include <asm/kvm_guest.h> + +static void *htm_buf; +static void *htm_status_buf; +static void *htm_info_buf; +static void *htm_caps_buf; +static u32 nodeindex; +static u32 nodalchipindex; +static u32 coreindexonchip; +static u32 htmtype; +static u32 htmconfigure; +static u32 htmstart; +static u32 htmsetup; +static u64 htmflags; + +static struct dentry *htmdump_debugfs_dir; +#define HTM_ENABLE 1 +#define HTM_DISABLE 0 +#define HTM_NOWRAP 1 +#define HTM_WRAP 0 + +/* + * Check the return code for H_HTM hcall. + * Return non-zero value (1) if either H_PARTIAL or H_SUCCESS + * is returned. For other return codes: + * Return zero if H_NOT_AVAILABLE. + * Return -EBUSY if hcall return busy. + * Return -EINVAL if any parameter or operation is not valid. + * Return -EPERM if HTM Virtualization Engine Technology code + * is not applied. + * Return -EIO if the HTM state is not valid. + */ +static ssize_t htm_return_check(long rc) +{ + switch (rc) { + case H_SUCCESS: + /* H_PARTIAL for the case where all available data can't be + * returned due to buffer size constraint. + */ + case H_PARTIAL: + break; + /* H_NOT_AVAILABLE indicates reading from an offset outside the range, + * i.e. past end of file. + */ + case H_NOT_AVAILABLE: + return 0; + case H_BUSY: + case H_LONG_BUSY_ORDER_1_MSEC: + case H_LONG_BUSY_ORDER_10_MSEC: + case H_LONG_BUSY_ORDER_100_MSEC: + case H_LONG_BUSY_ORDER_1_SEC: + case H_LONG_BUSY_ORDER_10_SEC: + case H_LONG_BUSY_ORDER_100_SEC: + return -EBUSY; + case H_PARAMETER: + case H_P2: + case H_P3: + case H_P4: + case H_P5: + case H_P6: + return -EINVAL; + case H_STATE: + return -EIO; + case H_AUTHORITY: + return -EPERM; + } + + /* + * Return 1 for H_SUCCESS/H_PARTIAL + */ + return 1; +} + +static ssize_t htmdump_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_buf = filp->private_data; + unsigned long page, read_size, available; + loff_t offset; + long rc, ret; + + page = ALIGN_DOWN(*ppos, PAGE_SIZE); + offset = (*ppos) % PAGE_SIZE; + + /* + * Invoke H_HTM call with: + * - operation as htm dump (H_HTM_OP_DUMP_DATA) + * - last three values are address, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DUMP_DATA, virt_to_phys(htm_buf), + PAGE_SIZE, page); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_DATA, returning %ld\n", ret); + return ret; + } + + available = PAGE_SIZE; + read_size = min(count, available); + *ppos += read_size; + return simple_read_from_buffer(ubuf, count, &offset, htm_buf, available); +} + +static const struct file_operations htmdump_fops = { + .llseek = NULL, + .read = htmdump_read, + .open = simple_open, +}; + +static int htmconfigure_set(void *data, u64 val) +{ + long rc, ret; + unsigned long param1 = -1, param2 = -1; + + /* + * value as 1 : configure HTM. + * value as 0 : deconfigure HTM. Return -EINVAL for + * other values. + */ + if (val == HTM_ENABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm configure (H_HTM_OP_CONFIGURE) + * - If htmflags is set, param1 and param2 will be -1 + * which is an indicator to use default htm mode reg mask + * and htm mode reg value. + * - last three values are unused, hence set to zero + */ + if (!htmflags) { + param1 = 0; + param2 = 0; + } + + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_CONFIGURE, param1, param2, 0); + } else if (val == HTM_DISABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm deconfigure (H_HTM_OP_DECONFIGURE) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DECONFIGURE, 0, 0, 0); + } else + return -EINVAL; + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed, returning %ld\n", ret); + return ret; + } + + /* Set htmconfigure if operation succeeds */ + htmconfigure = val; + + return 0; +} + +static int htmconfigure_get(void *data, u64 *val) +{ + *val = htmconfigure; + return 0; +} + +static int htmstart_set(void *data, u64 val) +{ + long rc, ret; + + /* + * value as 1: start HTM + * value as 0: stop HTM + * Return -EINVAL for other values. + */ + if (val == HTM_ENABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm start (H_HTM_OP_START) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_START, 0, 0, 0); + + } else if (val == HTM_DISABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm stop (H_HTM_OP_STOP) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_STOP, 0, 0, 0); + } else + return -EINVAL; + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed, returning %ld\n", ret); + return ret; + } + + /* Set htmstart if H_HTM_OP_START/H_HTM_OP_STOP operation succeeds */ + htmstart = val; + + return 0; +} + +static int htmstart_get(void *data, u64 *val) +{ + *val = htmstart; + return 0; +} + +static ssize_t htmstatus_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_status_buf = filp->private_data; + long rc, ret; + u64 *num_entries; + u64 to_copy; + int htmstatus_flag; + + /* + * Invoke H_HTM call with: + * - operation as htm status (H_HTM_OP_STATUS) + * - last three values as addr, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_STATUS, virt_to_phys(htm_status_buf), + PAGE_SIZE, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_STATUS, returning %ld\n", ret); + return ret; + } + + /* + * HTM status buffer, start of buffer + 0x10 gives the + * number of HTM entries in the buffer. Each nest htm status + * entry is 0x6 bytes where each core htm status entry is + * 0x8 bytes. + * So total count to copy is: + * 32 bytes (for first 7 fields) + (number of HTM entries * entry size) + */ + num_entries = htm_status_buf + 0x10; + if (htmtype == 0x2) + htmstatus_flag = 0x8; + else + htmstatus_flag = 0x6; + to_copy = 32 + (be64_to_cpu(*num_entries) * htmstatus_flag); + return simple_read_from_buffer(ubuf, count, ppos, htm_status_buf, to_copy); +} + +static const struct file_operations htmstatus_fops = { + .llseek = NULL, + .read = htmstatus_read, + .open = simple_open, +}; + +static ssize_t htminfo_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_info_buf = filp->private_data; + long rc, ret; + u64 *num_entries; + u64 to_copy; + + /* + * Invoke H_HTM call with: + * - operation as htm status (H_HTM_OP_STATUS) + * - last three values as addr, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DUMP_SYSPROC_CONF, virt_to_phys(htm_info_buf), + PAGE_SIZE, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_SYSPROC_CONF, returning %ld\n", ret); + return ret; + } + + /* + * HTM status buffer, start of buffer + 0x10 gives the + * number of HTM entries in the buffer. Each entry of processor + * is 16 bytes. + * + * So total count to copy is: + * 32 bytes (for first 5 fields) + (number of HTM entries * entry size) + */ + num_entries = htm_info_buf + 0x10; + to_copy = 32 + (be64_to_cpu(*num_entries) * 16); + return simple_read_from_buffer(ubuf, count, ppos, htm_info_buf, to_copy); +} + +static ssize_t htmcaps_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_caps_buf = filp->private_data; + long rc, ret; + + /* + * Invoke H_HTM call with: + * - operation as htm capabilities (H_HTM_OP_CAPABILITIES) + * - last three values as addr, size (0x80 for Capabilities Output Buffer + * and zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_CAPABILITIES, virt_to_phys(htm_caps_buf), + 0x80, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_CAPABILITIES, returning %ld\n", ret); + return ret; + } + + return simple_read_from_buffer(ubuf, count, ppos, htm_caps_buf, 0x80); +} + +static const struct file_operations htminfo_fops = { + .llseek = NULL, + .read = htminfo_read, + .open = simple_open, +}; + +static const struct file_operations htmcaps_fops = { + .llseek = NULL, + .read = htmcaps_read, + .open = simple_open, +}; + +static int htmsetup_set(void *data, u64 val) +{ + long rc, ret; + + /* + * Input value: HTM buffer size in the power of 2 + * example: hex value 0x21 ( decimal: 33 ) is for + * 8GB + * Invoke H_HTM call with: + * - operation as htm start (H_HTM_OP_SETUP) + * - parameter 1 set to input value. + * - last two values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_SETUP, val, 0, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_SETUP, returning %ld\n", ret); + return ret; + } + + /* Set htmsetup if H_HTM_OP_SETUP operation succeeds */ + htmsetup = val; + + return 0; +} + +static int htmsetup_get(void *data, u64 *val) +{ + *val = htmsetup; + return 0; +} + +static int htmflags_set(void *data, u64 val) +{ + /* + * Input value: + * Currently supported flag value is to enable/disable + * HTM buffer wrap. wrap is used along with "configure" + * to prevent HTM buffer from wrapping. + * Writing 1 will set noWrap while configuring HTM + */ + if (val == HTM_NOWRAP) + htmflags = H_HTM_FLAGS_NOWRAP; + else if (val == HTM_WRAP) + htmflags = 0; + else + return -EINVAL; + + return 0; +} + +static int htmflags_get(void *data, u64 *val) +{ + *val = htmflags; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(htmconfigure_fops, htmconfigure_get, htmconfigure_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmstart_fops, htmstart_get, htmstart_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmsetup_fops, htmsetup_get, htmsetup_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmflags_fops, htmflags_get, htmflags_set, "%llu\n"); + +static int htmdump_init_debugfs(void) +{ + htm_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_buf) { + pr_err("Failed to allocate htmdump buf\n"); + return -ENOMEM; + } + + htmdump_debugfs_dir = debugfs_create_dir("htmdump", + arch_debugfs_dir); + + debugfs_create_u32("nodeindex", 0600, + htmdump_debugfs_dir, &nodeindex); + debugfs_create_u32("nodalchipindex", 0600, + htmdump_debugfs_dir, &nodalchipindex); + debugfs_create_u32("coreindexonchip", 0600, + htmdump_debugfs_dir, &coreindexonchip); + debugfs_create_u32("htmtype", 0600, + htmdump_debugfs_dir, &htmtype); + debugfs_create_file("trace", 0400, htmdump_debugfs_dir, htm_buf, &htmdump_fops); + + /* + * Debugfs interface files to control HTM operations: + */ + debugfs_create_file("htmconfigure", 0600, htmdump_debugfs_dir, NULL, &htmconfigure_fops); + debugfs_create_file("htmstart", 0600, htmdump_debugfs_dir, NULL, &htmstart_fops); + debugfs_create_file("htmsetup", 0600, htmdump_debugfs_dir, NULL, &htmsetup_fops); + debugfs_create_file("htmflags", 0600, htmdump_debugfs_dir, NULL, &htmflags_fops); + + /* Debugfs interface file to present status of HTM */ + htm_status_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_status_buf) { + pr_err("Failed to allocate htmstatus buf\n"); + return -ENOMEM; + } + + /* Debugfs interface file to present System Processor Configuration */ + htm_info_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_info_buf) { + pr_err("Failed to allocate htm info buf\n"); + return -ENOMEM; + } + + /* Debugfs interface file to present HTM capabilities */ + htm_caps_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_caps_buf) { + pr_err("Failed to allocate htm caps buf\n"); + return -ENOMEM; + } + + debugfs_create_file("htmstatus", 0400, htmdump_debugfs_dir, htm_status_buf, &htmstatus_fops); + debugfs_create_file("htminfo", 0400, htmdump_debugfs_dir, htm_info_buf, &htminfo_fops); + debugfs_create_file("htmcaps", 0400, htmdump_debugfs_dir, htm_caps_buf, &htmcaps_fops); + + return 0; +} + +static int __init htmdump_init(void) +{ + /* Disable on kvm guest */ + if (is_kvm_guest()) { + pr_info("htmdump not supported inside KVM guest\n"); + return -EOPNOTSUPP; + } + + if (htmdump_init_debugfs()) + return -ENOMEM; + + return 0; +} + +static void __exit htmdump_exit(void) +{ + debugfs_remove_recursive(htmdump_debugfs_dir); + kfree(htm_buf); +} + +module_init(htmdump_init); +module_exit(htmdump_exit); +MODULE_DESCRIPTION("PHYP Hardware Trace Macro (HTM) data dumper"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index ab9fc6506861..2b0cac6fb61f 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -16,7 +16,7 @@ #ifdef CONFIG_TRACEPOINTS #ifndef CONFIG_JUMP_LABEL - .section ".toc","aw" + .data .globl hcall_tracepoint_refcount hcall_tracepoint_refcount: @@ -27,7 +27,9 @@ hcall_tracepoint_refcount: /* * precall must preserve all registers. use unused STK_PARAM() - * areas to save snapshots and opcode. + * areas to save snapshots and opcode. STK_PARAM() in the caller's + * frame will be available even on ELFv2 because these are all + * variadic functions. */ #define HCALL_INST_PRECALL(FIRST_REG) \ mflr r0; \ @@ -41,29 +43,29 @@ hcall_tracepoint_refcount: std r10,STK_PARAM(R10)(r1); \ std r0,16(r1); \ addi r4,r1,STK_PARAM(FIRST_REG); \ - stdu r1,-STACK_FRAME_OVERHEAD(r1); \ - bl __trace_hcall_entry; \ - ld r3,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ - ld r4,STACK_FRAME_OVERHEAD+STK_PARAM(R4)(r1); \ - ld r5,STACK_FRAME_OVERHEAD+STK_PARAM(R5)(r1); \ - ld r6,STACK_FRAME_OVERHEAD+STK_PARAM(R6)(r1); \ - ld r7,STACK_FRAME_OVERHEAD+STK_PARAM(R7)(r1); \ - ld r8,STACK_FRAME_OVERHEAD+STK_PARAM(R8)(r1); \ - ld r9,STACK_FRAME_OVERHEAD+STK_PARAM(R9)(r1); \ - ld r10,STACK_FRAME_OVERHEAD+STK_PARAM(R10)(r1) + stdu r1,-STACK_FRAME_MIN_SIZE(r1); \ + bl CFUNC(__trace_hcall_entry); \ + ld r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ + ld r4,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1); \ + ld r5,STACK_FRAME_MIN_SIZE+STK_PARAM(R5)(r1); \ + ld r6,STACK_FRAME_MIN_SIZE+STK_PARAM(R6)(r1); \ + ld r7,STACK_FRAME_MIN_SIZE+STK_PARAM(R7)(r1); \ + ld r8,STACK_FRAME_MIN_SIZE+STK_PARAM(R8)(r1); \ + ld r9,STACK_FRAME_MIN_SIZE+STK_PARAM(R9)(r1); \ + ld r10,STACK_FRAME_MIN_SIZE+STK_PARAM(R10)(r1) /* * postcall is performed immediately before function return which * allows liberal use of volatile registers. */ #define __HCALL_INST_POSTCALL \ - ld r0,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ - std r3,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ + ld r0,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ + std r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ mr r4,r3; \ mr r3,r0; \ - bl __trace_hcall_exit; \ - ld r0,STACK_FRAME_OVERHEAD+16(r1); \ - addi r1,r1,STACK_FRAME_OVERHEAD; \ + bl CFUNC(__trace_hcall_exit); \ + ld r0,STACK_FRAME_MIN_SIZE+16(r1); \ + addi r1,r1,STACK_FRAME_MIN_SIZE; \ ld r3,STK_PARAM(R3)(r1); \ mtlr r0 @@ -88,8 +90,8 @@ hcall_tracepoint_refcount: BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ - ld r12,hcall_tracepoint_refcount@toc(r2); \ - std r12,32(r1); \ + LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ; \ + ld r12,0(r12); \ cmpdi r12,0; \ bne- LABEL; \ 1: @@ -182,9 +184,6 @@ _GLOBAL_TOC(plpar_hcall) plpar_hcall_trace: HCALL_INST_PRECALL(R5) - std r4,STK_PARAM(R4)(r1) - mr r0,r4 - mr r4,r5 mr r5,r6 mr r6,r7 @@ -194,7 +193,7 @@ plpar_hcall_trace: HVSC - ld r12,STK_PARAM(R4)(r1) + ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1) std r4,0(r12) std r5,8(r12) std r6,16(r12) @@ -294,23 +293,20 @@ _GLOBAL_TOC(plpar_hcall9) plpar_hcall9_trace: HCALL_INST_PRECALL(R5) - std r4,STK_PARAM(R4)(r1) - mr r0,r4 - mr r4,r5 mr r5,r6 mr r6,r7 mr r7,r8 mr r8,r9 mr r9,r10 - ld r10,STACK_FRAME_OVERHEAD+STK_PARAM(R11)(r1) - ld r11,STACK_FRAME_OVERHEAD+STK_PARAM(R12)(r1) - ld r12,STACK_FRAME_OVERHEAD+STK_PARAM(R13)(r1) + ld r10,STACK_FRAME_MIN_SIZE+STK_PARAM(R11)(r1) + ld r11,STACK_FRAME_MIN_SIZE+STK_PARAM(R12)(r1) + ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R13)(r1) HVSC mr r0,r12 - ld r12,STACK_FRAME_OVERHEAD+STK_PARAM(R4)(r1) + ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1) std r4,0(r12) std r5,8(r12) std r6,16(r12) diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c index 1ac52963e08b..8803c947998e 100644 --- a/arch/powerpc/platforms/pseries/hvconsole.c +++ b/arch/powerpc/platforms/pseries/hvconsole.c @@ -25,7 +25,7 @@ * firmware. * @count: not used? */ -int hvc_get_chars(uint32_t vtermno, char *buf, int count) +ssize_t hvc_get_chars(uint32_t vtermno, u8 *buf, size_t count) { long ret; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; @@ -52,7 +52,7 @@ EXPORT_SYMBOL(hvc_get_chars); * firmware. Must be at least 16 bytes, even if count is less than 16. * @count: Send this number of characters. */ -int hvc_put_chars(uint32_t vtermno, const char *buf, int count) +ssize_t hvc_put_chars(uint32_t vtermno, const u8 *buf, size_t count) { unsigned long *lbuf = (unsigned long *) buf; long ret; diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c index 96e18d3b2fcf..d48c9c7ce10f 100644 --- a/arch/powerpc/platforms/pseries/hvcserver.c +++ b/arch/powerpc/platforms/pseries/hvcserver.c @@ -176,7 +176,7 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, = (unsigned int)last_p_partition_ID; /* copy the Null-term char too */ - strlcpy(&next_partner_info->location_code[0], + strscpy(&next_partner_info->location_code[0], (char *)&pi_buff[2], sizeof(next_partner_info->location_code)); diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 7ee3ed7d6cc2..3436b0af795e 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -47,6 +47,7 @@ #include <linux/slab.h> #include <linux/stat.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/ibmebus.h> #include <asm/machdep.h> @@ -54,7 +55,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */ .init_name = "ibmebus", }; -struct bus_type ibmebus_bus_type; +const struct bus_type ibmebus_bus_type; /* These devices will automatically be added to the bus during init */ static const struct of_device_id ibmebus_matches[] __initconst = { @@ -152,7 +153,11 @@ static const struct dma_map_ops ibmebus_dma_ops = { static int ibmebus_match_path(struct device *dev, const void *data) { struct device_node *dn = to_platform_device(dev)->dev.of_node; - return (of_find_node_by_path(data) == dn); + struct device_node *tn = of_find_node_by_path(data); + + of_node_put(tn); + + return (tn == dn); } static int ibmebus_match_node(struct device *dev, const void *data) @@ -263,7 +268,7 @@ static char *ibmebus_chomp(const char *in, size_t count) return out; } -static ssize_t probe_store(struct bus_type *bus, const char *buf, size_t count) +static ssize_t probe_store(const struct bus_type *bus, const char *buf, size_t count) { struct device_node *dn = NULL; struct device *dev; @@ -301,7 +306,7 @@ out: } static BUS_ATTR_WO(probe); -static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count) +static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count) { struct device *dev; char *path; @@ -334,7 +339,7 @@ static struct attribute *ibmbus_bus_attrs[] = { }; ATTRIBUTE_GROUPS(ibmbus_bus); -static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) +static int ibmebus_bus_bus_match(struct device *dev, const struct device_driver *drv) { const struct of_device_id *matches = drv->of_match_table; @@ -422,9 +427,14 @@ static struct attribute *ibmebus_bus_device_attrs[] = { }; ATTRIBUTE_GROUPS(ibmebus_bus_device); -struct bus_type ibmebus_bus_type = { +static int ibmebus_bus_modalias(const struct device *dev, struct kobj_uevent_env *env) +{ + return of_device_uevent_modalias(dev, env); +} + +const struct bus_type ibmebus_bus_type = { .name = "ibmebus", - .uevent = of_device_uevent_modalias, + .uevent = ibmebus_bus_modalias, .bus_groups = ibmbus_bus_groups, .match = ibmebus_bus_bus_match, .probe = ibmebus_bus_device_probe, @@ -451,6 +461,7 @@ static int __init ibmebus_bus_init(void) if (err) { printk(KERN_WARNING "%s: device_register returned %i\n", __func__, err); + put_device(&ibmebus_bus_device); bus_unregister(&ibmebus_bus_type); return err; diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c index 7b74d4d34e9a..f411d4fe7b24 100644 --- a/arch/powerpc/platforms/pseries/io_event_irq.c +++ b/arch/powerpc/platforms/pseries/io_event_irq.c @@ -143,7 +143,7 @@ static int __init ioei_init(void) { struct device_node *np; - ioei_check_exception_token = rtas_token("check-exception"); + ioei_check_exception_token = rtas_function_token(RTAS_FN_CHECK_EXCEPTION); if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) return -ENODEV; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index fba64304e859..eec333dd2e59 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -21,7 +21,9 @@ #include <linux/dma-mapping.h> #include <linux/crash_dump.h> #include <linux/memory.h> +#include <linux/vmalloc.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/iommu.h> #include <linux/rculist.h> #include <asm/io.h> @@ -50,7 +52,8 @@ enum { enum { DDW_EXT_SIZE = 0, DDW_EXT_RESET_DMA_WIN = 1, - DDW_EXT_QUERY_OUT_SIZE = 2 + DDW_EXT_QUERY_OUT_SIZE = 2, + DDW_EXT_LIMITED_ADDR_MODE = 3 }; static struct iommu_table *iommu_pseries_alloc_table(int node) @@ -66,6 +69,10 @@ static struct iommu_table *iommu_pseries_alloc_table(int node) return tbl; } +#ifdef CONFIG_IOMMU_API +static struct iommu_table_group_ops spapr_tce_table_group_ops; +#endif + static struct iommu_table_group *iommu_pseries_alloc_group(int node) { struct iommu_table_group *table_group; @@ -74,6 +81,11 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node) if (!table_group) return NULL; +#ifdef CONFIG_IOMMU_API + table_group->ops = &spapr_tce_table_group_ops; + table_group->pgsizes = SZ_4K; +#endif + table_group->tables[0] = iommu_pseries_alloc_table(node); if (table_group->tables[0]) return table_group; @@ -85,19 +97,24 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node) static void iommu_pseries_free_group(struct iommu_table_group *table_group, const char *node_name) { - struct iommu_table *tbl; - if (!table_group) return; - tbl = table_group->tables[0]; #ifdef CONFIG_IOMMU_API if (table_group->group) { iommu_group_put(table_group->group); BUG_ON(table_group->group); } #endif - iommu_tce_table_put(tbl); + + /* Default DMA window table is at index 0, while DDW at 1. SR-IOV + * adapters only have table on index 0(if not direct mapped). + */ + if (table_group->tables[0]) + iommu_tce_table_put(table_group->tables[0]); + + if (table_group->tables[1]) + iommu_tce_table_put(table_group->tables[1]); kfree(table_group); } @@ -132,7 +149,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, } -static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) +static void tce_clear_pSeries(struct iommu_table *tbl, long index, long npages) { __be64 *tcep; @@ -151,6 +168,39 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) return be64_to_cpu(*tcep); } +#ifdef CONFIG_IOMMU_API +static long pseries_tce_iommu_userspace_view_alloc(struct iommu_table *tbl) +{ + unsigned long cb = ALIGN(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE); + unsigned long *uas; + + if (tbl->it_indirect_levels) /* Impossible */ + return -EPERM; + + WARN_ON(tbl->it_userspace); + + uas = vzalloc(cb); + if (!uas) + return -ENOMEM; + + tbl->it_userspace = (__be64 *) uas; + + return 0; +} +#endif + +static void tce_iommu_userspace_view_free(struct iommu_table *tbl) +{ + vfree(tbl->it_userspace); + tbl->it_userspace = NULL; +} + +static void tce_free_pSeries(struct iommu_table *tbl) +{ + if (tbl->it_userspace) + tce_iommu_userspace_view_free(tbl); +} + static void tce_free_pSeriesLP(unsigned long liobn, long, long, long); static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); @@ -248,7 +298,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, * Set up the page with TCE data, looping through and setting * the values. */ - limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE); + limit = min_t(long, npages, 4096 / TCE_ENTRY_SIZE); for (l = 0; l < limit; l++) { tcep[l] = cpu_to_be64(proto_tce | rpn << tceshift); @@ -306,13 +356,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) { u64 rc; + long rpages = npages; + unsigned long limit; if (!firmware_has_feature(FW_FEATURE_STUFF_TCE)) return tce_free_pSeriesLP(tbl->it_index, tcenum, tbl->it_page_shift, npages); - rc = plpar_tce_stuff((u64)tbl->it_index, - (u64)tcenum << tbl->it_page_shift, 0, npages); + do { + limit = min_t(unsigned long, rpages, 512); + + rc = plpar_tce_stuff((u64)tbl->it_index, + (u64)tcenum << tbl->it_page_shift, 0, limit); + + rpages -= limit; + tcenum += limit; + } while (rpages > 0 && !rc); if (rc && printk_ratelimit()) { printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); @@ -352,6 +411,7 @@ struct dynamic_dma_window_prop { struct dma_win { struct device_node *device; const struct dynamic_dma_window_prop *prop; + bool direct; struct list_head list; }; @@ -374,8 +434,6 @@ static LIST_HEAD(dma_win_list); static DEFINE_SPINLOCK(dma_win_list_lock); /* protects initializing window twice for same device */ static DEFINE_MUTEX(dma_win_init_mutex); -#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" -#define DMA64_PROPNAME "linux,dma64-ddr-window-info" static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, unsigned long num_pfn, const void *arg) @@ -474,7 +532,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, * Set up the page with TCE data, looping through and setting * the values. */ - limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE); + limit = min_t(long, num_tce, 4096 / TCE_ENTRY_SIZE); dma_offset = next + be64_to_cpu(maprange->dma_base); for (l = 0; l < limit; l++) { @@ -555,32 +613,9 @@ static void iommu_table_setparms(struct pci_controller *phb, struct iommu_table_ops iommu_table_lpar_multi_ops; -/* - * iommu_table_setparms_lpar - * - * Function: On pSeries LPAR systems, return TCE table info, given a pci bus. - */ -static void iommu_table_setparms_lpar(struct pci_controller *phb, - struct device_node *dn, - struct iommu_table *tbl, - struct iommu_table_group *table_group, - const __be32 *dma_window) -{ - unsigned long offset, size, liobn; - - of_parse_dma_window(dn, dma_window, &liobn, &offset, &size); - - iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL, - &iommu_table_lpar_multi_ops); - - - table_group->tce32_start = offset; - table_group->tce32_size = size; -} - struct iommu_table_ops iommu_table_pseries_ops = { .set = tce_build_pSeries, - .clear = tce_free_pSeries, + .clear = tce_clear_pSeries, .get = tce_get_pseries }; @@ -689,40 +724,143 @@ static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned return rc; } + +static __be64 *tce_useraddr_pSeriesLP(struct iommu_table *tbl, long index, + bool __always_unused alloc) +{ + return tbl->it_userspace ? &tbl->it_userspace[index - tbl->it_offset] : NULL; +} #endif struct iommu_table_ops iommu_table_lpar_multi_ops = { .set = tce_buildmulti_pSeriesLP, #ifdef CONFIG_IOMMU_API .xchg_no_kill = tce_exchange_pseries, + .useraddrptr = tce_useraddr_pSeriesLP, #endif .clear = tce_freemulti_pSeriesLP, - .get = tce_get_pSeriesLP + .get = tce_get_pSeriesLP, + .free = tce_free_pSeries }; +#ifdef CONFIG_IOMMU_API +/* + * When the DMA window properties might have been removed, + * the parent node has the table_group setup on it. + */ +static struct device_node *pci_dma_find_parent_node(struct pci_dev *dev, + struct iommu_table_group *table_group) +{ + struct device_node *dn = pci_device_to_OF_node(dev); + struct pci_dn *rpdn; + + for (; dn && PCI_DN(dn); dn = dn->parent) { + rpdn = PCI_DN(dn); + + if (table_group == rpdn->table_group) + return dn; + } + + return NULL; +} +#endif + +/* + * Find nearest ibm,dma-window (default DMA window) or direct DMA window or + * dynamic 64bit DMA window, walking up the device tree. + */ +static struct device_node *pci_dma_find(struct device_node *dn, + struct dynamic_dma_window_prop *prop) +{ + const __be32 *default_prop = NULL; + const __be32 *ddw_prop = NULL; + struct device_node *rdn = NULL; + bool default_win = false, ddw_win = false; + + for ( ; dn && PCI_DN(dn); dn = dn->parent) { + default_prop = of_get_property(dn, "ibm,dma-window", NULL); + if (default_prop) { + rdn = dn; + default_win = true; + } + ddw_prop = of_get_property(dn, DIRECT64_PROPNAME, NULL); + if (ddw_prop) { + rdn = dn; + ddw_win = true; + break; + } + ddw_prop = of_get_property(dn, DMA64_PROPNAME, NULL); + if (ddw_prop) { + rdn = dn; + ddw_win = true; + break; + } + + /* At least found default window, which is the case for normal boot */ + if (default_win) + break; + } + + /* For PCI devices there will always be a DMA window, either on the device + * or parent bus + */ + WARN_ON(!(default_win | ddw_win)); + + /* caller doesn't want to get DMA window property */ + if (!prop) + return rdn; + + /* parse DMA window property. During normal system boot, only default + * DMA window is passed in OF. But, for kdump, a dedicated adapter might + * have both default and DDW in FDT. In this scenario, DDW takes precedence + * over default window. + */ + if (ddw_win) { + struct dynamic_dma_window_prop *p; + + p = (struct dynamic_dma_window_prop *)ddw_prop; + prop->liobn = p->liobn; + prop->dma_base = p->dma_base; + prop->tce_shift = p->tce_shift; + prop->window_shift = p->window_shift; + } else if (default_win) { + unsigned long offset, size, liobn; + + of_parse_dma_window(rdn, default_prop, &liobn, &offset, &size); + + prop->liobn = cpu_to_be32((u32)liobn); + prop->dma_base = cpu_to_be64(offset); + prop->tce_shift = cpu_to_be32(IOMMU_PAGE_SHIFT_4K); + prop->window_shift = cpu_to_be32(order_base_2(size)); + } + + return rdn; +} + static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) { struct iommu_table *tbl; struct device_node *dn, *pdn; struct pci_dn *ppci; - const __be32 *dma_window = NULL; + struct dynamic_dma_window_prop prop; dn = pci_bus_to_OF_node(bus); pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n", dn); - /* - * Find nearest ibm,dma-window (default DMA window), walking up the - * device tree + pdn = pci_dma_find(dn, &prop); + + /* In PPC architecture, there will always be DMA window on bus or one of the + * parent bus. During reboot, there will be ibm,dma-window property to + * define DMA window. For kdump, there will at least be default window or DDW + * or both. + * There is an exception to the above. In case the PE goes into frozen + * state, firmware may not provide ibm,dma-window property at the time + * of LPAR boot up. */ - for (pdn = dn; pdn != NULL; pdn = pdn->parent) { - dma_window = of_get_property(pdn, "ibm,dma-window", NULL); - if (dma_window != NULL) - break; - } - if (dma_window == NULL) { + if (!pdn) { pr_debug(" no ibm,dma-window property !\n"); return; } @@ -735,11 +873,17 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) if (!ppci->table_group) { ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node); tbl = ppci->table_group->tables[0]; - iommu_table_setparms_lpar(ppci->phb, pdn, tbl, - ppci->table_group, dma_window); + + iommu_table_setparms_common(tbl, ppci->phb->bus->number, + be32_to_cpu(prop.liobn), + be64_to_cpu(prop.dma_base), + 1ULL << be32_to_cpu(prop.window_shift), + be32_to_cpu(prop.tce_shift), NULL, + &iommu_table_lpar_multi_ops); if (!iommu_init_table(tbl, ppci->phb->node, 0, 0)) panic("Failed to initialize iommu table"); + iommu_register_group(ppci->table_group, pci_domain_nr(bus), 0); pr_debug(" created table: %p\n", ppci->table_group); @@ -835,7 +979,7 @@ static void __remove_dma_window(struct device_node *np, u32 *ddw_avail, u64 liob } static void remove_dma_window(struct device_node *np, u32 *ddw_avail, - struct property *win) + struct property *win, bool cleanup) { struct dynamic_dma_window_prop *dwp; u64 liobn; @@ -843,11 +987,44 @@ static void remove_dma_window(struct device_node *np, u32 *ddw_avail, dwp = win->value; liobn = (u64)be32_to_cpu(dwp->liobn); - clean_dma_window(np, dwp); + if (cleanup) + clean_dma_window(np, dwp); __remove_dma_window(np, ddw_avail, liobn); } -static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_name) +static void copy_property(struct device_node *pdn, const char *from, const char *to) +{ + struct property *src, *dst; + + src = of_find_property(pdn, from, NULL); + if (!src) + return; + + dst = kzalloc(sizeof(*dst), GFP_KERNEL); + if (!dst) + return; + + dst->name = kstrdup(to, GFP_KERNEL); + dst->value = kmemdup(src->value, src->length, GFP_KERNEL); + dst->length = src->length; + if (!dst->name || !dst->value) + return; + + if (of_add_property(pdn, dst)) { + pr_err("Unable to add DMA window property for %pOF", pdn); + goto free_prop; + } + + return; + +free_prop: + kfree(dst->name); + kfree(dst->value); + kfree(dst); +} + +static int remove_dma_window_named(struct device_node *np, bool remove_prop, const char *win_name, + bool cleanup) { struct property *win; u32 ddw_avail[DDW_APPLICABLE_SIZE]; @@ -862,13 +1039,20 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_ if (ret) return 0; - if (win->length >= sizeof(struct dynamic_dma_window_prop)) - remove_dma_window(np, ddw_avail, win); + remove_dma_window(np, ddw_avail, win, cleanup); if (!remove_prop) return 0; + /* Default window property if removed is lost as reset-pe doesn't restore it. + * Though FDT has a copy of it, the DLPAR hotplugged devices will not have a + * node on FDT until next reboot. So, back it up. + */ + if ((strcmp(win_name, "ibm,dma-window") == 0) && + !of_find_property(np, "ibm,dma-window-saved", NULL)) + copy_property(np, win_name, "ibm,dma-window-saved"); + ret = of_remove_property(np, win); if (ret) pr_warn("%pOF: failed to remove DMA window property: %d\n", @@ -876,7 +1060,8 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_ return 0; } -static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift) +static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift, + bool *direct_mapping) { struct dma_win *window; const struct dynamic_dma_window_prop *dma64; @@ -889,6 +1074,7 @@ static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *windo dma64 = window->prop; *dma_addr = be64_to_cpu(dma64->dma_base); *window_shift = be32_to_cpu(dma64->window_shift); + *direct_mapping = window->direct; found = true; break; } @@ -909,6 +1095,7 @@ static struct dma_win *ddw_list_new_entry(struct device_node *pdn, window->device = pdn; window->prop = dma64; + window->direct = false; return window; } @@ -923,10 +1110,16 @@ static void find_existing_ddw_windows_named(const char *name) for_each_node_with_property(pdn, name) { dma64 = of_get_property(pdn, name, &len); if (!dma64 || len < sizeof(*dma64)) { - remove_ddw(pdn, true, name); + remove_dma_window_named(pdn, true, name, true); continue; } + /* If at the time of system initialization, there are DDWs in OF, + * it means this is during kexec. DDW could be direct or dynamic. + * We will just mark DDWs as "dynamic" since this is kdump path, + * no need to worry about perforance. ddw_list_new_entry() will + * set window->direct = false. + */ window = ddw_list_new_entry(pdn, dma64); if (!window) { of_node_put(pdn); @@ -1021,9 +1214,6 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, ret = rtas_call(ddw_avail[DDW_QUERY_PE_DMA_WIN], 3, out_sz, query_out, cfg_addr, BUID_HI(buid), BUID_LO(buid)); - dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d\n", - ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid), - BUID_LO(buid), ret); switch (out_sz) { case 5: @@ -1041,6 +1231,11 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, break; } + dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d, lb=%llx ps=%x wn=%d\n", + ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid), + BUID_LO(buid), ret, query->largest_available_block, + query->page_size, query->windows_available); + return ret; } @@ -1090,28 +1285,13 @@ static LIST_HEAD(failed_ddw_pdn_list); static phys_addr_t ddw_memory_hotplug_max(void) { - phys_addr_t max_addr = memory_hotplug_max(); - struct device_node *memory; - - for_each_node_by_type(memory, "memory") { - unsigned long start, size; - int n_mem_addr_cells, n_mem_size_cells, len; - const __be32 *memcell_buf; + resource_size_t max_addr; - memcell_buf = of_get_property(memory, "reg", &len); - if (!memcell_buf || len <= 0) - continue; - - n_mem_addr_cells = of_n_addr_cells(memory); - n_mem_size_cells = of_n_size_cells(memory); - - start = of_read_number(memcell_buf, n_mem_addr_cells); - memcell_buf += n_mem_addr_cells; - size = of_read_number(memcell_buf, n_mem_size_cells); - memcell_buf += n_mem_size_cells; - - max_addr = max_t(phys_addr_t, max_addr, start + size); - } +#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) + max_addr = hot_add_drconf_memory_max(); +#else + max_addr = memblock_end_of_DRAM(); +#endif return max_addr; } @@ -1148,6 +1328,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn) ret); } +/* + * Platforms support placing PHB in limited address mode starting with LoPAR + * level 2.13 implement. In this mode, the DMA address returned by DDW is over + * 4GB but, less than 64-bits. This benefits IO adapters that don't support + * 64-bits for DMA addresses. + */ +static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn) +{ + int ret; + u32 cfg_addr, reset_dma_win, las_supported; + u64 buid; + struct device_node *dn; + struct pci_dn *pdn; + + ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win); + if (ret) + goto out; + + ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported); + + /* Limited Address Space extension available on the platform but DDW in + * limited addressing mode not supported + */ + if (!ret && !las_supported) + ret = -EPROTO; + + if (ret) { + dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret); + goto out; + } + + dn = pci_device_to_OF_node(dev); + pdn = PCI_DN(dn); + buid = pdn->phb->buid; + cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8); + + ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid), + BUID_LO(buid), 1); + if (ret) + dev_info(&dev->dev, + "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ", + reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid), + ret); + +out: + return ret; +} + /* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */ static int iommu_get_page_shift(u32 query_page_size) { @@ -1215,14 +1443,14 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64 * * returns true if can map all pages (direct mapping), false otherwise.. */ -static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) +static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask) { int len = 0, ret; int max_ram_len = order_base_2(ddw_memory_hotplug_max()); struct ddw_query_response query; struct ddw_create_response create; int page_shift; - u64 win_addr; + u64 win_addr, dynamic_offset = 0; const char *win_name; struct device_node *dn; u32 ddw_avail[DDW_APPLICABLE_SIZE]; @@ -1230,9 +1458,13 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) struct property *win64; struct failed_ddw_pdn *fpdn; bool default_win_removed = false, direct_mapping = false; + bool dynamic_mapping = false; bool pmem_present; struct pci_dn *pci = PCI_DN(pdn); - struct iommu_table *tbl = pci->table_group->tables[0]; + struct property *default_win = NULL; + bool limited_addr_req = false, limited_addr_enabled = false; + int dev_max_ddw; + int ddw_sz; dn = of_find_node_by_type(NULL, "ibm,pmemory"); pmem_present = dn != NULL; @@ -1240,10 +1472,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) mutex_lock(&dma_win_init_mutex); - if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) { - direct_mapping = (len >= max_ram_len); + if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len, &direct_mapping)) goto out_unlock; - } /* * If we already went through this for a previous function of @@ -1261,7 +1491,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) * the ibm,ddw-applicable property holds the tokens for: * ibm,query-pe-dma-window * ibm,create-pe-dma-window - * ibm,remove-pe-dma-window * for the given node in that order. * the property is actually in the parent, not the PE */ @@ -1281,6 +1510,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (ret != 0) goto out_failed; + /* DMA Limited Addressing required? This is when the driver has + * requested to create DDW but supports mask which is less than 64-bits + */ + limited_addr_req = (dma_mask != DMA_BIT_MASK(64)); + + /* place the PHB in Limited Addressing mode */ + if (limited_addr_req) { + if (limited_dma_window(dev, pdn)) + goto out_failed; + + /* PHB is in Limited address mode */ + limited_addr_enabled = true; + } + /* * If there is no window available, remove the default DMA window, * if it's present. This will make all the resources available to the @@ -1289,11 +1532,10 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) * for extensions presence. */ if (query.windows_available == 0) { - struct property *default_win; int reset_win_ext; /* DDW + IOMMU on single window may fail if there is any allocation */ - if (iommu_table_in_use(tbl)) { + if (iommu_table_in_use(pci->table_group->tables[0])) { dev_warn(&dev->dev, "current IOMMU table in use, can't be replaced.\n"); goto out_failed; } @@ -1306,7 +1548,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (reset_win_ext) goto out_failed; - remove_dma_window(pdn, ddw_avail, default_win); + remove_dma_window(pdn, ddw_avail, default_win, true); default_win_removed = true; /* Query again, to check if the window is available */ @@ -1328,6 +1570,14 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_failed; } + /* Maximum DMA window size that the device can address (in log2) */ + dev_max_ddw = fls64(dma_mask); + + /* If the device DMA mask is less than 64-bits, make sure the DMA window + * size is not bigger than what the device can access + */ + ddw_sz = min(order_base_2(query.largest_available_block << page_shift), + dev_max_ddw); /* * The "ibm,pmemory" can appear anywhere in the address space. @@ -1337,30 +1587,56 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) */ len = max_ram_len; if (pmem_present) { - if (query.largest_available_block >= - (1ULL << (MAX_PHYSMEM_BITS - page_shift))) + if (ddw_sz >= MAX_PHYSMEM_BITS) len = MAX_PHYSMEM_BITS; else dev_info(&dev->dev, "Skipping ibm,pmemory"); } /* check if the available block * number of ptes will map everything */ - if (query.largest_available_block < (1ULL << (len - page_shift))) { + if (ddw_sz < len) { dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu %llu-sized pages\n", 1ULL << len, query.largest_available_block, 1ULL << page_shift); - len = order_base_2(query.largest_available_block << page_shift); - win_name = DMA64_PROPNAME; + len = ddw_sz; + dynamic_mapping = true; } else { direct_mapping = !default_win_removed || (len == MAX_PHYSMEM_BITS) || (!pmem_present && (len == max_ram_len)); - win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; + + /* DDW is big enough to direct map RAM. If there is vPMEM, check + * if enough space is left in DDW where we can dynamically + * allocate TCEs for vPMEM. For now, this Hybrid sharing of DDW + * is only for SR-IOV devices. + */ + if (default_win_removed && pmem_present && !direct_mapping) { + /* DDW is big enough to be split */ + if ((1ULL << ddw_sz) >= + MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) { + + direct_mapping = true; + + /* offset of the Dynamic part of DDW */ + dynamic_offset = 1ULL << max_ram_len; + } + + /* DDW will at least have dynamic allocation */ + dynamic_mapping = true; + + /* create max size DDW possible */ + len = ddw_sz; + } } + /* Even if the DDW is split into both direct mapped RAM and dynamically + * mapped vPMEM, the DDW property in OF will be marked as Direct. + */ + win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; + ret = create_ddw(dev, ddw_avail, &create, page_shift, len); if (ret != 0) goto out_failed; @@ -1388,9 +1664,11 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (!window) goto out_del_prop; + window->direct = direct_mapping; + if (direct_mapping) { /* DDW maps the whole partition, so enable direct DMA mapping */ - ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, + ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT, win64->value, tce_setrange_multi_pSeriesLP_walk); if (ret) { dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", @@ -1400,10 +1678,18 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) clean_dma_window(pdn, win64->value); goto out_del_list; } - } else { + if (default_win_removed) { + iommu_tce_table_put(pci->table_group->tables[0]); + pci->table_group->tables[0] = NULL; + set_iommu_table_base(&dev->dev, NULL); + } + } + + if (dynamic_mapping) { struct iommu_table *newtbl; int i; unsigned long start = 0, end = 0; + u64 dynamic_addr, dynamic_len; for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) { const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM; @@ -1423,22 +1709,31 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_del_list; } - iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr, - 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); - iommu_init_table(newtbl, pci->phb->node, start, end); - - pci->table_group->tables[1] = newtbl; + /* If the DDW is split between directly mapped RAM and Dynamic + * mapped for TCES, offset into the DDW where the dynamic part + * begins. + */ + dynamic_addr = win_addr + dynamic_offset; + dynamic_len = (1UL << len) - dynamic_offset; + iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, + dynamic_addr, dynamic_len, page_shift, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(newtbl, pci->phb->node, + start >> page_shift, end >> page_shift); - /* Keep default DMA window struct if removed */ - if (default_win_removed) { - tbl->it_size = 0; - vfree(tbl->it_map); - tbl->it_map = NULL; - } + pci->table_group->tables[default_win_removed ? 0 : 1] = newtbl; set_iommu_table_base(&dev->dev, newtbl); } + if (default_win_removed) { + /* default_win is valid here because default_win_removed == true */ + if (!of_find_property(pdn, "ibm,dma-window-saved", NULL)) + copy_property(pdn, "ibm,dma-window", "ibm,dma-window-saved"); + of_remove_property(pdn, default_win); + dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn); + } + spin_lock(&dma_win_list_lock); list_add(&window->list, &dma_win_list); spin_unlock(&dma_win_list_lock); @@ -1462,7 +1757,7 @@ out_remove_win: __remove_dma_window(pdn, ddw_avail, create.liobn); out_failed: - if (default_win_removed) + if (default_win_removed || limited_addr_enabled) reset_dma_window(dev, pdn); fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); @@ -1474,23 +1769,90 @@ out_failed: out_unlock: mutex_unlock(&dma_win_init_mutex); - /* - * If we have persistent memory and the window size is only as big - * as RAM, then we failed to create a window to cover persistent - * memory and need to set the DMA limit. + /* If we have persistent memory and the window size is not big enough + * to directly map both RAM and vPMEM, then we need to set DMA limit. */ - if (pmem_present && direct_mapping && len == max_ram_len) - dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len); + if (pmem_present && direct_mapping && len != MAX_PHYSMEM_BITS) + dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + + (1ULL << max_ram_len); + + dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n", + limited_addr_req, limited_addr_enabled, direct_mapping); return direct_mapping; } +static __u64 query_page_size_to_mask(u32 query_page_size) +{ + const long shift[] = { + (SZ_4K), (SZ_64K), (SZ_16M), + (SZ_32M), (SZ_64M), (SZ_128M), + (SZ_256M), (SZ_16G), (SZ_2M) + }; + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(shift); i++) { + if (query_page_size & (1 << i)) + ret |= shift[i]; + } + + return ret; +} + +static void spapr_tce_init_table_group(struct pci_dev *pdev, + struct device_node *pdn, + struct dynamic_dma_window_prop prop) +{ + struct iommu_table_group *table_group = PCI_DN(pdn)->table_group; + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + + struct ddw_query_response query; + int ret; + + /* Only for normal boot with default window. Doesn't matter during + * kdump, since these will not be used during kdump. + */ + if (is_kdump_kernel()) + return; + + if (table_group->max_dynamic_windows_supported != 0) + return; /* already initialized */ + + table_group->tce32_start = be64_to_cpu(prop.dma_base); + table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); + + if (!of_find_property(pdn, "ibm,dma-window", NULL)) + dev_err(&pdev->dev, "default dma window missing!\n"); + + ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) { + table_group->max_dynamic_windows_supported = -1; + return; + } + + ret = query_ddw(pdev, ddw_avail, &query, pdn); + if (ret) { + dev_err(&pdev->dev, "%s: query_ddw failed\n", __func__); + table_group->max_dynamic_windows_supported = -1; + return; + } + + if (query.windows_available == 0) + table_group->max_dynamic_windows_supported = 1; + else + table_group->max_dynamic_windows_supported = IOMMU_TABLE_GROUP_MAX_TABLES; + + table_group->max_levels = 1; + table_group->pgsizes |= query_page_size_to_mask(query.page_size); +} + static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) { struct device_node *pdn, *dn; struct iommu_table *tbl; - const __be32 *dma_window = NULL; struct pci_dn *pci; + struct dynamic_dma_window_prop prop; pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); @@ -1503,13 +1865,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) dn = pci_device_to_OF_node(dev); pr_debug(" node is %pOF\n", dn); - for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group; - pdn = pdn->parent) { - dma_window = of_get_property(pdn, "ibm,dma-window", NULL); - if (dma_window) - break; - } - + pdn = pci_dma_find(dn, &prop); if (!pdn || !PCI_DN(pdn)) { printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: " "no DMA window found for pci dev=%s dn=%pOF\n", @@ -1522,8 +1878,13 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) if (!pci->table_group) { pci->table_group = iommu_pseries_alloc_group(pci->phb->node); tbl = pci->table_group->tables[0]; - iommu_table_setparms_lpar(pci->phb, pdn, tbl, - pci->table_group, dma_window); + + iommu_table_setparms_common(tbl, pci->phb->bus->number, + be32_to_cpu(prop.liobn), + be64_to_cpu(prop.dma_base), + 1ULL << be32_to_cpu(prop.window_shift), + be32_to_cpu(prop.tce_shift), NULL, + &iommu_table_lpar_multi_ops); iommu_init_table(tbl, pci->phb->node, 0, 0); iommu_register_group(pci->table_group, @@ -1533,6 +1894,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pr_debug(" found DMA window, table: %p\n", pci->table_group); } + spapr_tce_init_table_group(dev, pdn, prop); + set_iommu_table_base(&dev->dev, pci->table_group->tables[0]); iommu_add_device(pci->table_group, &dev->dev); } @@ -1540,10 +1903,12 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) { struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; - const __be32 *dma_window = NULL; - /* only attempt to use a new window if 64-bit DMA is requested */ - if (dma_mask < DMA_BIT_MASK(64)) + /* For DDW, DMA mask should be more than 32-bits. For mask more then + * 32-bits but less then 64-bits, DMA addressing is supported in + * Limited Addressing mode. + */ + if (dma_mask <= DMA_BIT_MASK(32)) return false; dev_dbg(&pdev->dev, "node is %pOF\n", dn); @@ -1554,19 +1919,503 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) * search upwards in the tree until we either hit a dma-window * property, OR find a parent with a table already allocated. */ - for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group; - pdn = pdn->parent) { - dma_window = of_get_property(pdn, "ibm,dma-window", NULL); - if (dma_window) - break; + pdn = pci_dma_find(dn, NULL); + if (pdn && PCI_DN(pdn)) + return enable_ddw(pdev, pdn, dma_mask); + + return false; +} + +#ifdef CONFIG_IOMMU_API +/* + * A simple iommu_table_group_ops which only allows reusing the existing + * iommu_table. This handles VFIO for POWER7 or the nested KVM. + * The ops does not allow creating windows and only allows reusing the existing + * one if it matches table_group->tce32_start/tce32_size/page_shift. + */ +static unsigned long spapr_tce_get_table_size(__u32 page_shift, + __u64 window_size, __u32 levels) +{ + unsigned long size; + + if (levels > 1) + return ~0U; + size = window_size >> (page_shift - 3); + return size; +} + +static struct pci_dev *iommu_group_get_first_pci_dev(struct iommu_group *group) +{ + struct pci_dev *pdev = NULL; + int ret; + + /* No IOMMU group ? */ + if (!group) + return NULL; + + ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table); + if (!ret || !pdev) + return NULL; + return pdev; +} + +static void restore_default_dma_window(struct pci_dev *pdev, struct device_node *pdn) +{ + reset_dma_window(pdev, pdn); + copy_property(pdn, "ibm,dma-window-saved", "ibm,dma-window"); +} + +static long remove_dynamic_dma_windows(struct pci_dev *pdev, struct device_node *pdn) +{ + struct pci_dn *pci = PCI_DN(pdn); + struct dma_win *window; + bool direct_mapping; + int len; + + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, &direct_mapping)) { + remove_dma_window_named(pdn, true, direct_mapping ? + DIRECT64_PROPNAME : DMA64_PROPNAME, true); + if (!direct_mapping) { + WARN_ON(!pci->table_group->tables[0] && !pci->table_group->tables[1]); + + if (pci->table_group->tables[1]) { + iommu_tce_table_put(pci->table_group->tables[1]); + pci->table_group->tables[1] = NULL; + } else if (pci->table_group->tables[0]) { + /* Default window was removed and only the DDW exists */ + iommu_tce_table_put(pci->table_group->tables[0]); + pci->table_group->tables[0] = NULL; + } + } + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { + if (window->device == pdn) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&dma_win_list_lock); } - if (pdn && PCI_DN(pdn)) - return enable_ddw(pdev, pdn); + return 0; +} + +static long pseries_setup_default_iommu_config(struct iommu_table_group *table_group, + struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + const __be32 *default_prop; + long liobn, offset, size; + struct device_node *pdn; + struct iommu_table *tbl; + struct pci_dn *pci; + + pdn = pci_dma_find_parent_node(pdev, table_group); + if (!pdn || !PCI_DN(pdn)) { + dev_warn(&pdev->dev, "No table_group configured for the node %pOF\n", pdn); + return -1; + } + pci = PCI_DN(pdn); + + /* The default window is restored if not present already on removal of DDW. + * However, if used by VFIO SPAPR sub driver, the user's order of removal of + * windows might have been different to not leading to auto restoration, + * suppose the DDW was removed first followed by the default one. + * So, restore the default window with reset-pe-dma call explicitly. + */ + restore_default_dma_window(pdev, pdn); + + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + of_parse_dma_window(pdn, default_prop, &liobn, &offset, &size); + tbl = iommu_pseries_alloc_table(pci->phb->node); + if (!tbl) { + dev_err(&pdev->dev, "couldn't create new IOMMU table\n"); + return -1; + } + + iommu_table_setparms_common(tbl, pci->phb->bus->number, liobn, offset, + size, IOMMU_PAGE_SHIFT_4K, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, 0, 0); + + pci->table_group->tables[0] = tbl; + set_iommu_table_base(&pdev->dev, tbl); + + return 0; +} + +static bool is_default_window_request(struct iommu_table_group *table_group, __u32 page_shift, + __u64 window_size) +{ + if ((window_size <= table_group->tce32_size) && + (page_shift == IOMMU_PAGE_SHIFT_4K)) + return true; + + return false; +} + +static long spapr_tce_create_table(struct iommu_table_group *table_group, int num, + __u32 page_shift, __u64 window_size, __u32 levels, + struct iommu_table **ptbl) +{ + struct pci_dev *pdev = iommu_group_get_first_pci_dev(table_group->group); + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + struct ddw_create_response create; + unsigned long liobn, offset, size; + unsigned long start = 0, end = 0; + struct ddw_query_response query; + const __be32 *default_prop; + struct failed_ddw_pdn *fpdn; + unsigned int window_shift; + struct device_node *pdn; + struct iommu_table *tbl; + struct dma_win *window; + struct property *win64; + struct pci_dn *pci; + u64 win_addr; + int len, i; + long ret; + + if (!is_power_of_2(window_size) || levels > 1) + return -EINVAL; + + window_shift = order_base_2(window_size); + + mutex_lock(&dma_win_init_mutex); + + ret = -ENODEV; + + pdn = pci_dma_find_parent_node(pdev, table_group); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + goto out_failed; + } + pci = PCI_DN(pdn); + + /* If the enable DDW failed for the pdn, dont retry! */ + list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) { + if (fpdn->pdn == pdn) { + dev_info(&pdev->dev, "%pOF in failed DDW device list\n", pdn); + goto out_unlock; + } + } + + tbl = iommu_pseries_alloc_table(pci->phb->node); + if (!tbl) { + dev_dbg(&pdev->dev, "couldn't create new IOMMU table\n"); + goto out_unlock; + } + + if (num == 0) { + bool direct_mapping; + /* The request is not for default window? Ensure there is no DDW window already */ + if (!is_default_window_request(table_group, page_shift, window_size)) { + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, + &direct_mapping)) { + dev_warn(&pdev->dev, "%pOF: 64-bit window already present.", pdn); + ret = -EPERM; + goto out_unlock; + } + } else { + /* Request is for Default window, ensure there is no DDW if there is a + * need to reset. reset-pe otherwise removes the DDW also + */ + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + if (!default_prop) { + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, + &direct_mapping)) { + dev_warn(&pdev->dev, "%pOF: Attempt to create window#0 when 64-bit window is present. Preventing the attempt as that would destroy the 64-bit window", + pdn); + ret = -EPERM; + goto out_unlock; + } + + restore_default_dma_window(pdev, pdn); + + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + of_parse_dma_window(pdn, default_prop, &liobn, &offset, &size); + /* Limit the default window size to window_size */ + iommu_table_setparms_common(tbl, pci->phb->bus->number, liobn, + offset, 1UL << window_shift, + IOMMU_PAGE_SHIFT_4K, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, + start >> IOMMU_PAGE_SHIFT_4K, + end >> IOMMU_PAGE_SHIFT_4K); + + table_group->tables[0] = tbl; + + mutex_unlock(&dma_win_init_mutex); + + goto exit; + } + } + } + + ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) { + dev_info(&pdev->dev, "ibm,ddw-applicable not found\n"); + goto out_failed; + } + ret = -ENODEV; + + pr_err("%s: Calling query %pOF\n", __func__, pdn); + ret = query_ddw(pdev, ddw_avail, &query, pdn); + if (ret) + goto out_failed; + ret = -ENODEV; + + len = window_shift; + if (query.largest_available_block < (1ULL << (len - page_shift))) { + dev_dbg(&pdev->dev, "can't map window 0x%llx with %llu %llu-sized pages\n", + 1ULL << len, query.largest_available_block, + 1ULL << page_shift); + ret = -EINVAL; /* Retry with smaller window size */ + goto out_unlock; + } + + if (create_ddw(pdev, ddw_avail, &create, page_shift, len)) { + pr_err("%s: Create ddw failed %pOF\n", __func__, pdn); + goto out_failed; + } + + win_addr = ((u64)create.addr_hi << 32) | create.addr_lo; + win64 = ddw_property_create(DMA64_PROPNAME, create.liobn, win_addr, page_shift, len); + if (!win64) + goto remove_window; + + ret = of_add_property(pdn, win64); + if (ret) { + dev_err(&pdev->dev, "unable to add DMA window property for %pOF: %ld", pdn, ret); + goto free_property; + } + ret = -ENODEV; + + window = ddw_list_new_entry(pdn, win64->value); + if (!window) + goto remove_property; + + window->direct = false; + + for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) { + const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM; + + /* Look for MMIO32 */ + if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) { + start = pci->phb->mem_resources[i].start; + end = pci->phb->mem_resources[i].end; + break; + } + } + + /* New table for using DDW instead of the default DMA window */ + iommu_table_setparms_common(tbl, pci->phb->bus->number, create.liobn, win_addr, + 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, start >> page_shift, end >> page_shift); + + pci->table_group->tables[num] = tbl; + set_iommu_table_base(&pdev->dev, tbl); + pdev->dev.archdata.dma_offset = win_addr; + + spin_lock(&dma_win_list_lock); + list_add(&window->list, &dma_win_list); + spin_unlock(&dma_win_list_lock); + + mutex_unlock(&dma_win_init_mutex); + + goto exit; + +remove_property: + of_remove_property(pdn, win64); +free_property: + kfree(win64->name); + kfree(win64->value); + kfree(win64); +remove_window: + __remove_dma_window(pdn, ddw_avail, create.liobn); + +out_failed: + fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); + if (!fpdn) + goto out_unlock; + fpdn->pdn = pdn; + list_add(&fpdn->list, &failed_ddw_pdn_list); + +out_unlock: + mutex_unlock(&dma_win_init_mutex); + + return ret; +exit: + /* Allocate the userspace view */ + pseries_tce_iommu_userspace_view_alloc(tbl); + tbl->it_allocated_size = spapr_tce_get_table_size(page_shift, window_size, levels); + + *ptbl = iommu_tce_table_get(tbl); + + return 0; +} + +static bool is_default_window_table(struct iommu_table_group *table_group, struct iommu_table *tbl) +{ + if (((tbl->it_size << tbl->it_page_shift) <= table_group->tce32_size) && + (tbl->it_page_shift == IOMMU_PAGE_SHIFT_4K)) + return true; return false; } +static long spapr_tce_set_window(struct iommu_table_group *table_group, + int num, struct iommu_table *tbl) +{ + return tbl == table_group->tables[num] ? 0 : -EPERM; +} + +static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num) +{ + struct pci_dev *pdev = iommu_group_get_first_pci_dev(table_group->group); + struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; + struct iommu_table *tbl = table_group->tables[num]; + struct failed_ddw_pdn *fpdn; + struct dma_win *window; + const char *win_name; + int ret = -ENODEV; + + if (!tbl) /* The table was never created OR window was never opened */ + return 0; + + mutex_lock(&dma_win_init_mutex); + + if ((num == 0) && is_default_window_table(table_group, tbl)) + win_name = "ibm,dma-window"; + else + win_name = DMA64_PROPNAME; + + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + goto out_failed; + } + + /* Dont clear the TCEs, User should have done it */ + if (remove_dma_window_named(pdn, true, win_name, false)) { + pr_err("%s: The existing DDW removal failed for node %pOF\n", __func__, pdn); + goto out_failed; /* Could not remove it either! */ + } + + if (strcmp(win_name, DMA64_PROPNAME) == 0) { + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { + if (window->device == pdn) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&dma_win_list_lock); + } + + iommu_tce_table_put(table_group->tables[num]); + table_group->tables[num] = NULL; + + ret = 0; + + goto out_unlock; + +out_failed: + fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); + if (!fpdn) + goto out_unlock; + fpdn->pdn = pdn; + list_add(&fpdn->list, &failed_ddw_pdn_list); + +out_unlock: + mutex_unlock(&dma_win_init_mutex); + + return ret; +} + +static long spapr_tce_take_ownership(struct iommu_table_group *table_group, struct device *dev) +{ + struct iommu_table *tbl = table_group->tables[0]; + struct pci_dev *pdev = to_pci_dev(dev); + struct device_node *dn = pci_device_to_OF_node(pdev); + struct device_node *pdn; + + /* SRIOV VFs using direct map by the host driver OR multifunction devices + * where the ownership was taken on the attempt by the first function + */ + if (!tbl && (table_group->max_dynamic_windows_supported != 1)) + return 0; + + mutex_lock(&dma_win_init_mutex); + + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + mutex_unlock(&dma_win_init_mutex); + return -1; + } + + /* + * Though rtas call reset-pe removes the DDW, it doesn't clear the entries on the table + * if there are any. In case of direct map, the entries will be left over, which + * is fine for PEs with 2 DMA windows where the second window is created with create-pe + * at which point the table is cleared. However, on VFs having only one DMA window, the + * default window would end up seeing the entries left over from the direct map done + * on the second window. So, remove the ddw explicitly so that clean_dma_window() + * cleans up the entries if any. + */ + if (remove_dynamic_dma_windows(pdev, pdn)) { + dev_warn(&pdev->dev, "The existing DDW removal failed for node %pOF\n", pdn); + mutex_unlock(&dma_win_init_mutex); + return -1; + } + + /* The table_group->tables[0] is not null now, it must be the default window + * Remove it, let the userspace create it as it needs. + */ + if (table_group->tables[0]) { + remove_dma_window_named(pdn, true, "ibm,dma-window", true); + iommu_tce_table_put(tbl); + table_group->tables[0] = NULL; + } + set_iommu_table_base(dev, NULL); + + mutex_unlock(&dma_win_init_mutex); + + return 0; +} + +static void spapr_tce_release_ownership(struct iommu_table_group *table_group, struct device *dev) +{ + struct iommu_table *tbl = table_group->tables[0]; + + if (tbl) { /* Default window already restored */ + return; + } + + mutex_lock(&dma_win_init_mutex); + + /* Restore the default window */ + pseries_setup_default_iommu_config(table_group, dev); + + mutex_unlock(&dma_win_init_mutex); + + return; +} + +static struct iommu_table_group_ops spapr_tce_table_group_ops = { + .get_table_size = spapr_tce_get_table_size, + .create_table = spapr_tce_create_table, + .set_window = spapr_tce_set_window, + .unset_window = spapr_tce_unset_window, + .take_ownership = spapr_tce_take_ownership, + .release_ownership = spapr_tce_release_ownership, +}; +#endif + static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -1574,12 +2423,20 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, struct memory_notify *arg = data; int ret = 0; + /* This notifier can get called when onlining persistent memory as well. + * TCEs are not pre-mapped for persistent memory. Persistent memory will + * always be above ddw_memory_hotplug_max() + */ + switch (action) { case MEM_GOING_ONLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, - arg->nr_pages, window->prop); + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { + ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, + arg->nr_pages, window->prop); + } /* XXX log error */ } spin_unlock(&dma_win_list_lock); @@ -1588,8 +2445,11 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, case MEM_OFFLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, - arg->nr_pages, window->prop); + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { + ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, + arg->nr_pages, window->prop); + } /* XXX log error */ } spin_unlock(&dma_win_list_lock); @@ -1624,8 +2484,8 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti * we have to remove the property when releasing * the device node. */ - if (remove_ddw(np, false, DIRECT64_PROPNAME)) - remove_ddw(np, false, DMA64_PROPNAME); + if (remove_dma_window_named(np, false, DIRECT64_PROPNAME, true)) + remove_dma_window_named(np, false, DMA64_PROPNAME, true); if (pci && pci->table_group) iommu_pseries_free_group(pci->table_group, @@ -1691,27 +2551,26 @@ static int __init disable_multitce(char *str) __setup("multitce=", disable_multitce); -static int tce_iommu_bus_notifier(struct notifier_block *nb, - unsigned long action, void *data) +#ifdef CONFIG_SPAPR_TCE_IOMMU +struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose, + struct pci_dev *pdev) { - struct device *dev = data; + struct device_node *pdn, *dn = pdev->dev.of_node; + struct iommu_group *grp; + struct pci_dn *pci; - switch (action) { - case BUS_NOTIFY_DEL_DEVICE: - iommu_del_device(dev); - return 0; - default: - return 0; - } -} + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) + return ERR_PTR(-ENODEV); -static struct notifier_block tce_iommu_bus_nb = { - .notifier_call = tce_iommu_bus_notifier, -}; + pci = PCI_DN(pdn); + if (!pci->table_group) + return ERR_PTR(-ENODEV); -static int __init tce_iommu_bus_notifier_init(void) -{ - bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); - return 0; + grp = pci->table_group->group; + if (!grp) + return ERR_PTR(-ENODEV); + + return iommu_group_ref_get(grp); } -machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init); +#endif diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index ab6cdbebb35e..431be156ca9b 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -6,7 +6,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> -#include <asm/machdep.h> +#include <asm/setup.h> #include <asm/page.h> #include <asm/firmware.h> #include <asm/kexec.h> @@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary) } else xics_kexec_teardown_cpu(secondary); } - -void pseries_machine_kexec(struct kimage *image) -{ - if (firmware_has_feature(FW_FEATURE_SET_MODE)) - pseries_disable_reloc_on_exc(); - - default_machine_kexec(image); -} diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 937f9c010b22..6a415febc53b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/jump_label.h> #include <linux/delay.h> +#include <linux/seq_file.h> #include <linux/stop_machine.h> #include <linux/spinlock.h> #include <linux/cpuhotplug.h> @@ -27,11 +28,12 @@ #include <asm/processor.h> #include <asm/mmu.h> #include <asm/page.h> -#include <asm/machdep.h> +#include <asm/setup.h> #include <asm/mmu_context.h> #include <asm/iommu.h> #include <asm/tlb.h> #include <asm/cputable.h> +#include <asm/papr-sysparm.h> #include <asm/udbg.h> #include <asm/smp.h> #include <asm/trace.h> @@ -40,6 +42,7 @@ #include <asm/kexec.h> #include <asm/fadump.h> #include <asm/dtl.h> +#include <asm/vphn.h> #include "pseries.h" @@ -167,7 +170,7 @@ struct vcpu_dispatch_data { */ #define NR_CPUS_H NR_CPUS -DEFINE_RWLOCK(dtl_access_lock); +DECLARE_RWSEM(dtl_access_lock); static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data); static DEFINE_PER_CPU(u64, dtl_entry_ridx); static DEFINE_PER_CPU(struct dtl_worker, dtl_workers); @@ -190,9 +193,9 @@ static void free_dtl_buffers(unsigned long *time_limit) continue; kmem_cache_free(dtl_cache, pp->dispatch_log); pp->dtl_ridx = 0; - pp->dispatch_log = 0; - pp->dispatch_log_end = 0; - pp->dtl_curr = 0; + pp->dispatch_log = NULL; + pp->dispatch_log_end = NULL; + pp->dtl_curr = NULL; if (time_limit && time_after(jiffies, *time_limit)) { cond_resched(); @@ -221,7 +224,7 @@ static void destroy_cpu_associativity(void) { kfree(vcpu_associativity); kfree(pcpu_associativity); - vcpu_associativity = pcpu_associativity = 0; + vcpu_associativity = pcpu_associativity = NULL; } static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag) @@ -461,7 +464,7 @@ static int dtl_worker_enable(unsigned long *time_limit) { int rc = 0, state; - if (!write_trylock(&dtl_access_lock)) { + if (!down_write_trylock(&dtl_access_lock)) { rc = -EBUSY; goto out; } @@ -477,7 +480,7 @@ static int dtl_worker_enable(unsigned long *time_limit) pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); free_dtl_buffers(time_limit); reset_global_dtl_mask(); - write_unlock(&dtl_access_lock); + up_write(&dtl_access_lock); rc = -EINVAL; goto out; } @@ -492,7 +495,7 @@ static void dtl_worker_disable(unsigned long *time_limit) cpuhp_remove_state(dtl_worker_state); free_dtl_buffers(time_limit); reset_global_dtl_mask(); - write_unlock(&dtl_access_lock); + up_write(&dtl_access_lock); } static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, @@ -524,8 +527,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, if (cmd) { rc = init_cpu_associativity(); - if (rc) + if (rc) { + destroy_cpu_associativity(); goto out; + } for_each_possible_cpu(cpu) { disp = per_cpu_ptr(&vcpu_disp_data, cpu); @@ -638,16 +643,8 @@ static const struct proc_ops vcpudispatch_stats_freq_proc_ops = { static int __init vcpudispatch_stats_procfs_init(void) { - /* - * Avoid smp_processor_id while preemptible. All CPUs should have - * the same value for lppaca_shared_proc. - */ - preempt_disable(); - if (!lppaca_shared_proc(get_lppaca())) { - preempt_enable(); + if (!lppaca_shared_proc()) return 0; - } - preempt_enable(); if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL, &vcpudispatch_stats_proc_ops)) @@ -660,6 +657,21 @@ static int __init vcpudispatch_stats_procfs_init(void) } machine_device_initcall(pseries, vcpudispatch_stats_procfs_init); + +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +u64 pseries_paravirt_steal_clock(int cpu) +{ + struct lppaca *lppaca = &lppaca_of(cpu); + + /* + * VPA steal time counters are reported at TB frequency. Hence do a + * conversion to ns before returning + */ + return tb_to_ns(be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) + + be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb))); +} +#endif + #endif /* CONFIG_PPC_SPLPAR */ void vpa_init(int cpu) @@ -1458,8 +1470,6 @@ static inline void __init check_lp_set_hblkrm(unsigned int lp, } } -#define SPLPAR_TLB_BIC_TOKEN 50 - /* * The size of the TLB Block Invalidate Characteristics is variable. But at the * maximum it will be the number of possible page sizes *2 + 10 bytes. @@ -1470,42 +1480,24 @@ static inline void __init check_lp_set_hblkrm(unsigned int lp, void __init pseries_lpar_read_hblkrm_characteristics(void) { - unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH]; - int call_status, len, idx, bpsize; + static struct papr_sysparm_buf buf __initdata; + int len, idx, bpsize; if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) return; - spin_lock(&rtas_data_buf_lock); - memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - SPLPAR_TLB_BIC_TOKEN, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); - memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH); - local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0'; - spin_unlock(&rtas_data_buf_lock); - - if (call_status != 0) { - pr_warn("%s %s Error calling get-system-parameter (0x%x)\n", - __FILE__, __func__, call_status); + if (papr_sysparm_get(PAPR_SYSPARM_TLB_BLOCK_INVALIDATE_ATTRS, &buf)) return; - } - /* - * The first two (2) bytes of the data in the buffer are the length of - * the returned data, not counting these first two (2) bytes. - */ - len = be16_to_cpu(*((u16 *)local_buffer)) + 2; + len = be16_to_cpu(buf.len); if (len > SPLPAR_TLB_BIC_MAXLENGTH) { pr_warn("%s too large returned buffer %d", __func__, len); return; } - idx = 2; + idx = 0; while (idx < len) { - u8 block_shift = local_buffer[idx++]; + u8 block_shift = buf.val[idx++]; u32 block_size; unsigned int npsize; @@ -1514,9 +1506,9 @@ void __init pseries_lpar_read_hblkrm_characteristics(void) block_size = 1 << block_shift; - for (npsize = local_buffer[idx++]; + for (npsize = buf.val[idx++]; npsize > 0 && idx < len; npsize--) - check_lp_set_hblkrm((unsigned int) local_buffer[idx++], + check_lp_set_hblkrm((unsigned int)buf.val[idx++], block_size); } @@ -1895,10 +1887,10 @@ out: * h_get_mpp * H_GET_MPP hcall returns info in 7 parms */ -int h_get_mpp(struct hvcall_mpp_data *mpp_data) +long h_get_mpp(struct hvcall_mpp_data *mpp_data) { - int rc; - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; + long rc; rc = plpar_hcall9(H_GET_MPP, retbuf); diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index 507dc0b5987d..cc22924f159f 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/proc_fs.h> #include <linux/init.h> +#include <asm/papr-sysparm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/uaccess.h> @@ -28,13 +29,13 @@ #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/time.h> -#include <asm/vdso_datapage.h> #include <asm/vio.h> #include <asm/mmu.h> #include <asm/machdep.h> #include <asm/drmem.h> #include "pseries.h" +#include "vas.h" /* pseries_vas_dlpar_cpu() */ /* * This isn't a module but we expose that to userspace @@ -111,8 +112,8 @@ struct hvcall_ppp_data { */ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data) { - unsigned long rc; - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; + long rc; rc = plpar_hcall9(H_GET_PPP, retbuf); @@ -168,20 +169,24 @@ out: kfree(buf); } -static unsigned h_pic(unsigned long *pool_idle_time, - unsigned long *num_procs) +static long h_pic(unsigned long *pool_idle_time, + unsigned long *num_procs) { - unsigned long rc; - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = {0}; rc = plpar_hcall(H_PIC, retbuf); - *pool_idle_time = retbuf[0]; - *num_procs = retbuf[1]; + if (pool_idle_time) + *pool_idle_time = retbuf[0]; + if (num_procs) + *num_procs = retbuf[1]; return rc; } +unsigned long boot_pool_idle_time; + /* * parse_ppp_data * Parse out the data returned from h_get_ppp and h_pic @@ -191,7 +196,7 @@ static void parse_ppp_data(struct seq_file *m) struct hvcall_ppp_data ppp_data; struct device_node *root; const __be32 *perf_level; - int rc; + long rc; rc = h_get_ppp(&ppp_data); if (rc) @@ -204,7 +209,7 @@ static void parse_ppp_data(struct seq_file *m) ppp_data.active_system_procs); /* pool related entries are appropriate for shared configs */ - if (lppaca_shared_proc(get_lppaca())) { + if (lppaca_shared_proc()) { unsigned long pool_idle_time, pool_procs; seq_printf(m, "pool=%d\n", ppp_data.pool_num); @@ -213,9 +218,15 @@ static void parse_ppp_data(struct seq_file *m) seq_printf(m, "pool_capacity=%d\n", ppp_data.active_procs_in_pool * 100); - h_pic(&pool_idle_time, &pool_procs); - seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time); - seq_printf(m, "pool_num_procs=%ld\n", pool_procs); + /* In case h_pic call is not successful, this would result in + * APP values being wrong in tools like lparstat. + */ + + if (h_pic(&pool_idle_time, &pool_procs) == H_SUCCESS) { + seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time); + seq_printf(m, "pool_num_procs=%ld\n", pool_procs); + seq_printf(m, "boot_pool_idle_time=%ld\n", boot_pool_idle_time); + } } seq_printf(m, "unallocated_capacity_weight=%d\n", @@ -311,16 +322,6 @@ static void parse_mpp_x_data(struct seq_file *m) } /* - * PAPR defines, in section "7.3.16 System Parameters Option", the token 55 to - * read the LPAR name, and the largest output data to 4000 + 2 bytes length. - */ -#define SPLPAR_LPAR_NAME_TOKEN 55 -#define GET_SYS_PARM_BUF_SIZE 4002 -#if GET_SYS_PARM_BUF_SIZE > RTAS_DATA_BUF_SIZE -#error "GET_SYS_PARM_BUF_SIZE is larger than RTAS_DATA_BUF_SIZE" -#endif - -/* * Read the lpar name using the RTAS ibm,get-system-parameter call. * * The name read through this call is updated if changes are made by the end @@ -331,46 +332,19 @@ static void parse_mpp_x_data(struct seq_file *m) */ static int read_rtas_lpar_name(struct seq_file *m) { - int rc, len, token; - union { - char raw_buffer[GET_SYS_PARM_BUF_SIZE]; - struct { - __be16 len; - char name[GET_SYS_PARM_BUF_SIZE-2]; - }; - } *local_buffer; - - token = rtas_token("ibm,get-system-parameter"); - if (token == RTAS_UNKNOWN_SERVICE) - return -EINVAL; + struct papr_sysparm_buf *buf; + int err; - local_buffer = kmalloc(sizeof(*local_buffer), GFP_KERNEL); - if (!local_buffer) + buf = papr_sysparm_buf_alloc(); + if (!buf) return -ENOMEM; - do { - spin_lock(&rtas_data_buf_lock); - memset(rtas_data_buf, 0, sizeof(*local_buffer)); - rc = rtas_call(token, 3, 1, NULL, SPLPAR_LPAR_NAME_TOKEN, - __pa(rtas_data_buf), sizeof(*local_buffer)); - if (!rc) - memcpy(local_buffer->raw_buffer, rtas_data_buf, - sizeof(local_buffer->raw_buffer)); - spin_unlock(&rtas_data_buf_lock); - } while (rtas_busy_delay(rc)); - - if (!rc) { - /* Force end of string */ - len = min((int) be16_to_cpu(local_buffer->len), - (int) sizeof(local_buffer->name)-1); - local_buffer->name[len] = '\0'; - - seq_printf(m, "partition_name=%s\n", local_buffer->name); - } else - rc = -ENODATA; + err = papr_sysparm_get(PAPR_SYSPARM_LPAR_NAME, buf); + if (!err) + seq_printf(m, "partition_name=%s\n", buf->val); - kfree(local_buffer); - return rc; + papr_sysparm_buf_free(buf); + return err; } /* @@ -381,9 +355,13 @@ static int read_rtas_lpar_name(struct seq_file *m) */ static int read_dt_lpar_name(struct seq_file *m) { + struct device_node *root = of_find_node_by_path("/"); const char *name; + int ret; - if (of_property_read_string(of_root, "ibm,partition-name", &name)) + ret = of_property_read_string(root, "ibm,partition-name", &name); + of_node_put(root); + if (ret) return -ENOENT; seq_printf(m, "partition_name=%s\n", name); @@ -392,11 +370,10 @@ static int read_dt_lpar_name(struct seq_file *m) static void read_lpar_name(struct seq_file *m) { - if (read_rtas_lpar_name(m) && read_dt_lpar_name(m)) - pr_err_once("Error can't get the LPAR name"); + if (read_rtas_lpar_name(m)) + read_dt_lpar_name(m); } -#define SPLPAR_CHARACTERISTICS_TOKEN 20 #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) /* @@ -407,45 +384,25 @@ static void read_lpar_name(struct seq_file *m) */ static void parse_system_parameter_string(struct seq_file *m) { - int call_status; + struct papr_sysparm_buf *buf; - unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); - if (!local_buffer) { - printk(KERN_ERR "%s %s kmalloc failure at line %d\n", - __FILE__, __func__, __LINE__); + buf = papr_sysparm_buf_alloc(); + if (!buf) return; - } - spin_lock(&rtas_data_buf_lock); - memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH); - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - SPLPAR_CHARACTERISTICS_TOKEN, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); - memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); - local_buffer[SPLPAR_MAXLENGTH - 1] = '\0'; - spin_unlock(&rtas_data_buf_lock); - - if (call_status != 0) { - printk(KERN_INFO - "%s %s Error calling get-system-parameter (0x%x)\n", - __FILE__, __func__, call_status); + if (papr_sysparm_get(PAPR_SYSPARM_SHARED_PROC_LPAR_ATTRS, buf)) { + goto out_free; } else { + const char *local_buffer; int splpar_strlen; int idx, w_idx; char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); - if (!workbuffer) { - printk(KERN_ERR "%s %s kmalloc failure at line %d\n", - __FILE__, __func__, __LINE__); - kfree(local_buffer); - return; - } -#ifdef LPARCFG_DEBUG - printk(KERN_INFO "success calling get-system-parameter\n"); -#endif - splpar_strlen = local_buffer[0] * 256 + local_buffer[1]; - local_buffer += 2; /* step over strlen value */ + + if (!workbuffer) + goto out_free; + + splpar_strlen = be16_to_cpu(buf->len); + local_buffer = buf->val; w_idx = 0; idx = 0; @@ -479,7 +436,8 @@ static void parse_system_parameter_string(struct seq_file *m) kfree(workbuffer); local_buffer -= 2; /* back up over strlen value */ } - kfree(local_buffer); +out_free: + papr_sysparm_buf_free(buf); } /* Return the number of processors in the system. @@ -571,7 +529,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL); if (lrdrp == NULL) { - partition_potential_processors = vdso_data->processorCount; + partition_potential_processors = num_possible_cpus(); } else { partition_potential_processors = be32_to_cpup(lrdrp + 4); } @@ -594,7 +552,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) } else { /* non SPLPAR case */ seq_printf(m, "system_active_processors=%d\n", - partition_potential_processors); + partition_active_processors); seq_printf(m, "system_potential_processors=%d\n", partition_potential_processors); @@ -615,7 +573,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) partition_potential_processors); seq_printf(m, "shared_processor_mode=%d\n", - lppaca_shared_proc(get_lppaca())); + lppaca_shared_proc()); #ifdef CONFIG_PPC_64S_HASH_MMU if (!radix_enabled()) @@ -748,6 +706,16 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf, return -EINVAL; retval = update_ppp(new_entitled_ptr, NULL); + + if (retval == H_SUCCESS || retval == H_CONSTRAINED) { + /* + * The hypervisor assigns VAS resources based + * on entitled capacity for shared mode. + * Reconfig VAS windows based on DLPAR CPU events. + */ + if (pseries_vas_dlpar_cpu() != 0) + retval = H_HARDWARE; + } } else if (!strcmp(kbuf, "capacity_weight")) { char *endp; *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10); @@ -833,6 +801,7 @@ static const struct proc_ops lparcfg_proc_ops = { static int __init lparcfg_init(void) { umode_t mode = 0444; + long retval; /* Allow writing if we have FW_FEATURE_SPLPAR */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) @@ -842,6 +811,16 @@ static int __init lparcfg_init(void) printk(KERN_ERR "Failed to create powerpc/lparcfg\n"); return -EIO; } + + /* If this call fails, it would result in APP values + * being wrong for since boot reports of lparstat + */ + retval = h_pic(&boot_pool_idle_time, NULL); + + if (retval != H_SUCCESS) + pr_debug("H_PIC failed during lparcfg init retval: %ld\n", + retval); + return 0; } machine_device_initcall(pseries, lparcfg_init); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 78f3f74c7056..62bd8e2d5d4c 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -24,6 +24,7 @@ #include <linux/stringify.h> #include <asm/machdep.h> +#include <asm/nmi.h> #include <asm/rtas.h> #include "pseries.h" #include "vas.h" /* vas_migration_handler() */ @@ -48,6 +49,30 @@ struct update_props_workarea { #define MIGRATION_SCOPE (1) #define PRRN_SCOPE -2 +#ifdef CONFIG_PPC_WATCHDOG +static unsigned int nmi_wd_lpm_factor = 200; + +#ifdef CONFIG_SYSCTL +static const struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { + { + .procname = "nmi_wd_lpm_factor", + .data = &nmi_wd_lpm_factor, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + }, +}; + +static int __init register_nmi_wd_lpm_factor_sysctl(void) +{ + register_sysctl("kernel", nmi_wd_lpm_factor_ctl_table); + + return 0; +} +device_initcall(register_nmi_wd_lpm_factor_sysctl); +#endif /* CONFIG_SYSCTL */ +#endif /* CONFIG_PPC_WATCHDOG */ + static int mobility_rtas_call(int token, char *buf, s32 scope) { int rc; @@ -162,7 +187,7 @@ static int update_dt_node(struct device_node *dn, s32 scope) u32 nprops; u32 vd; - update_properties_token = rtas_token("ibm,update-properties"); + update_properties_token = rtas_function_token(RTAS_FN_IBM_UPDATE_PROPERTIES); if (update_properties_token == RTAS_UNKNOWN_SERVICE) return -EINVAL; @@ -183,7 +208,7 @@ static int update_dt_node(struct device_node *dn, s32 scope) nprops = be32_to_cpu(upwa->nprops); /* On the first call to ibm,update-properties for a node the - * the first property value descriptor contains an empty + * first property value descriptor contains an empty * property name, the property value length encoded as u32, * and the property value is the node path being updated. */ @@ -273,7 +298,7 @@ static int pseries_devicetree_update(s32 scope) int update_nodes_token; int rc; - update_nodes_token = rtas_token("ibm,update-nodes"); + update_nodes_token = rtas_function_token(RTAS_FN_IBM_UPDATE_NODES); if (update_nodes_token == RTAS_UNKNOWN_SERVICE) return 0; @@ -427,6 +452,43 @@ static int wait_for_vasi_session_suspending(u64 handle) return ret; } +static void wait_for_vasi_session_completed(u64 handle) +{ + unsigned long state = 0; + int ret; + + pr_info("waiting for memory transfer to complete...\n"); + + /* + * Wait for transition from H_VASI_RESUMED to H_VASI_COMPLETED. + */ + while (true) { + ret = poll_vasi_state(handle, &state); + + /* + * If the memory transfer is already complete and the migration + * has been cleaned up by the hypervisor, H_PARAMETER is return, + * which is translate in EINVAL by poll_vasi_state(). + */ + if (ret == -EINVAL || (!ret && state == H_VASI_COMPLETED)) { + pr_info("memory transfer completed.\n"); + break; + } + + if (ret) { + pr_err("H_VASI_STATE return error (%d)\n", ret); + break; + } + + if (state != H_VASI_RESUMED) { + pr_err("unexpected H_VASI_STATE result %lu\n", state); + break; + } + + msleep(500); + } +} + static void prod_single(unsigned int target_cpu) { long hvrc; @@ -565,10 +627,13 @@ retry: prod_others(); } /* - * Execution may have been suspended for several seconds, so - * reset the watchdog. + * Execution may have been suspended for several seconds, so reset + * the watchdogs. touch_nmi_watchdog() also touches the soft lockup + * watchdog. */ + rcu_cpu_stall_reset(); touch_nmi_watchdog(); + return ret; } @@ -665,19 +730,45 @@ static int pseries_suspend(u64 handle) static int pseries_migrate_partition(u64 handle) { int ret; + unsigned int factor = 0; + +#ifdef CONFIG_PPC_WATCHDOG + factor = nmi_wd_lpm_factor; +#endif + /* + * When the migration is initiated, the hypervisor changes VAS + * mappings to prepare before OS gets the notification and + * closes all VAS windows. NX generates continuous faults during + * this time and the user space can not differentiate these + * faults from the migration event. So reduce this time window + * by closing VAS windows at the beginning of this function. + */ + vas_migration_handler(VAS_SUSPEND); ret = wait_for_vasi_session_suspending(handle); if (ret) - return ret; + goto out; - vas_migration_handler(VAS_SUSPEND); + if (factor) + watchdog_hardlockup_set_timeout_pct(factor); ret = pseries_suspend(handle); - if (ret == 0) + if (ret == 0) { post_mobility_fixup(); - else + /* + * Wait until the memory transfer is complete, so that the user + * space process returns from the syscall after the transfer is + * complete. This allows the user hooks to be executed at the + * right time. + */ + wait_for_vasi_session_completed(handle); + } else pseries_cancel_migration(handle, ret); + if (factor) + watchdog_hardlockup_set_timeout_pct(0); + +out: vas_migration_handler(VAS_RESUME); return ret; @@ -688,8 +779,8 @@ int rtas_syscall_dispatch_ibm_suspend_me(u64 handle) return pseries_migrate_partition(handle); } -static ssize_t migration_store(struct class *class, - struct class_attribute *attr, const char *buf, +static ssize_t migration_store(const struct class *class, + const struct class_attribute *attr, const char *buf, size_t count) { u64 streamid; diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index a3a71d37cb9a..ee1c8c6898a3 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -9,6 +9,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> +#include <linux/seq_file.h> #include <asm/rtas.h> #include <asm/hw_irq.h> @@ -26,6 +27,7 @@ static int query_token, change_token; #define RTAS_CHANGE_MSI_FN 3 #define RTAS_CHANGE_MSIX_FN 4 #define RTAS_CHANGE_32MSI_FN 5 +#define RTAS_CHANGE_32MSIX_FN 6 /* RTAS Helpers */ @@ -41,7 +43,7 @@ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs) seq_num = 1; do { if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN || - func == RTAS_CHANGE_32MSI_FN) + func == RTAS_CHANGE_32MSI_FN || func == RTAS_CHANGE_32MSIX_FN) rc = rtas_call(change_token, 6, 4, rtas_ret, addr, BUID_HI(buid), BUID_LO(buid), func, num_irqs, seq_num); @@ -406,8 +408,12 @@ again: if (use_32bit_msi_hack && rc > 0) rtas_hack_32bit_msi_gen2(pdev); - } else - rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); + } else { + if (pdev->no_64bit_msi) + rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSIX_FN, nvec); + else + rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); + } if (rc != nvec) { if (nvec != nvec_in) { @@ -447,21 +453,18 @@ static void pseries_msi_ops_msi_free(struct irq_domain *domain, * RTAS can not disable one MSI at a time. It's all or nothing. Do it * at the end after all IRQs have been freed. */ -static void pseries_msi_domain_free_irqs(struct irq_domain *domain, - struct device *dev) +static void pseries_msi_post_free(struct irq_domain *domain, struct device *dev) { if (WARN_ON_ONCE(!dev_is_pci(dev))) return; - __msi_domain_free_irqs(domain, dev); - rtas_disable_msi(to_pci_dev(dev)); } static struct msi_domain_ops pseries_pci_msi_domain_ops = { .msi_prepare = pseries_msi_ops_prepare, .msi_free = pseries_msi_ops_msi_free, - .domain_free_irqs = pseries_msi_domain_free_irqs, + .msi_post_free = pseries_msi_post_free, }; static void pseries_msi_shutdown(struct irq_data *d) @@ -522,7 +525,12 @@ static struct msi_domain_info pseries_msi_domain_info = { static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { - __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + struct pci_dev *dev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + + if (dev->current_state == PCI_D0) + __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + else + get_cached_msi_msg(data->irq, msg); } static struct irq_chip pseries_msi_irq_chip = { @@ -608,7 +616,7 @@ static const struct irq_domain_ops pseries_irq_domain_ops = { static int __pseries_msi_allocate_domains(struct pci_controller *phb, unsigned int count) { - struct irq_domain *parent = irq_get_default_host(); + struct irq_domain *parent = irq_get_default_domain(); phb->fwnode = irq_domain_alloc_named_id_fwnode("pSeries-MSI", phb->global_number); @@ -625,7 +633,7 @@ static int __pseries_msi_allocate_domains(struct pci_controller *phb, return -ENOMEM; } - phb->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(phb->dn), + phb->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(phb->dn), &pseries_msi_domain_info, phb->dev_domain); if (!phb->msi_domain) { @@ -682,8 +690,8 @@ static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev) static int rtas_msi_init(void) { - query_token = rtas_token("ibm,query-interrupt-source-number"); - change_token = rtas_token("ibm,change-msi"); + query_token = rtas_function_token(RTAS_FN_IBM_QUERY_INTERRUPT_SOURCE_NUMBER); + change_token = rtas_function_token(RTAS_FN_IBM_CHANGE_MSI); if ((query_token == RTAS_UNKNOWN_SERVICE) || (change_token == RTAS_UNKNOWN_SERVICE)) { diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index cbf1720eb4aa..8130c37962c0 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -227,8 +227,8 @@ int __init pSeries_nvram_init(void) nvram_size = be32_to_cpup(nbytes_p); - nvram_fetch = rtas_token("nvram-fetch"); - nvram_store = rtas_token("nvram-store"); + nvram_fetch = rtas_function_token(RTAS_FN_NVRAM_FETCH); + nvram_store = rtas_function_token(RTAS_FN_NVRAM_STORE); printk(KERN_INFO "PPC64 nvram contains %d bytes\n", nvram_size); of_node_put(nvram); diff --git a/arch/powerpc/platforms/pseries/papr-indices.c b/arch/powerpc/platforms/pseries/papr-indices.c new file mode 100644 index 000000000000..3c7545591c45 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-indices.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-indices: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-indices.h> +#include "papr-rtas-common.h" + +/* + * Function-specific return values for ibm,set-dynamic-indicator and + * ibm,get-dynamic-sensor-state RTAS calls. + * PAPR+ v2.13 7.3.18 and 7.3.19. + */ +#define RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR -3 + +/** + * struct rtas_get_indices_params - Parameters (in and out) for + * ibm,get-indices. + * @is_sensor: In: Caller-provided whether sensor or indicator. + * @indice_type:In: Caller-provided indice (sensor or indicator) token + * @work_area: In: Caller-provided work area buffer for results. + * @next: In: Sequence number. Out: Next sequence number. + * @status: Out: RTAS call status. + */ +struct rtas_get_indices_params { + u8 is_sensor; + u32 indice_type; + struct rtas_work_area *work_area; + u32 next; + s32 status; +}; + +/* + * rtas_ibm_get_indices() - Call ibm,get-indices to fill a work area buffer. + * @params: See &struct rtas_ibm_get_indices_params. + * + * Calls ibm,get-indices until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_ibm_get_indices() multiple times + * to retrieve all indices data for the provided indice type. Only one + * sequence should be in progress at any time; starting a new sequence + * will disrupt any sequence already in progress. Serialization of + * indices retrieval sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_ibm_get_indices(struct rtas_get_indices_params *params) +{ + struct rtas_work_area *work_area = params->work_area; + const s32 token = rtas_function_token(RTAS_FN_IBM_GET_INDICES); + u32 rets; + s32 fwrc; + int ret; + + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + lockdep_assert_held(&rtas_ibm_get_indices_lock); + + do { + fwrc = rtas_call(token, 5, 2, &rets, params->is_sensor, + params->indice_type, + rtas_work_area_phys(work_area), + rtas_work_area_size(work_area), + params->next); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: /* Indicator type is not supported */ + ret = -EINVAL; + break; + case RTAS_SEQ_START_OVER: + ret = -EAGAIN; + pr_info_ratelimited("Indices changed during retrieval, retrying\n"); + params->next = 1; + break; + case RTAS_SEQ_MORE_DATA: + params->next = rets; + ret = 0; + break; + case RTAS_SEQ_COMPLETE: + params->next = 0; + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-indices status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal indices sequence APIs. A sequence is a series of calls to + * ibm,get-indices for a given location code. The sequence ends when + * an error is encountered or all indices for the input has been + * returned. + */ + +/* + * indices_sequence_begin() - Begin a indices retrieval sequence. + * + * Context: May sleep. + */ +static void indices_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_get_indices_params *param; + + param = (struct rtas_get_indices_params *)seq->params; + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_get_indices_lock); + param->work_area = rtas_work_area_alloc(RTAS_GET_INDICES_BUF_SIZE); + param->next = 1; + param->status = 0; +} + +/* + * indices_sequence_end() - Finalize a indices retrieval sequence. + * + * Releases resources obtained by indices_sequence_begin(). + */ +static void indices_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_get_indices_params *param; + + param = (struct rtas_get_indices_params *)seq->params; + rtas_work_area_free(param->work_area); + mutex_unlock(&rtas_ibm_get_indices_lock); +} + +/* + * Work function to be passed to papr_rtas_blob_generate(). + * + * ibm,get-indices RTAS call fills the work area with the certain + * format but does not return the bytes written in the buffer. So + * instead of kernel parsing this work area to determine the buffer + * length, copy the complete work area (RTAS_GET_INDICES_BUF_SIZE) + * to the blob and let the user space to obtain the data. + * Means RTAS_GET_INDICES_BUF_SIZE data will be returned for each + * read(). + */ + +static const char *indices_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_get_indices_params *p; + bool init_state; + + p = (struct rtas_get_indices_params *)seq->params; + init_state = (p->next == 1) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_indices(p))) + return NULL; + + *len = RTAS_GET_INDICES_BUF_SIZE; + return rtas_work_area_raw_buf(p->work_area); +} + +/* + * papr_indices_handle_read - returns indices blob data to the user space + * + * ibm,get-indices RTAS call fills the work area with the certian + * format but does not return the bytes written in the buffer and + * copied RTAS_GET_INDICES_BUF_SIZE data to the blob for each RTAS + * call. So send RTAS_GET_INDICES_BUF_SIZE buffer to the user space + * for each read(). + */ +static ssize_t papr_indices_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + const struct papr_rtas_blob *blob = file->private_data; + + /* we should not instantiate a handle without any data attached. */ + if (!papr_rtas_blob_has_data(blob)) { + pr_err_once("handle without data\n"); + return -EIO; + } + + if (size < RTAS_GET_INDICES_BUF_SIZE) { + pr_err_once("Invalid buffer length %ld, expect %d\n", + size, RTAS_GET_INDICES_BUF_SIZE); + return -EINVAL; + } else if (size > RTAS_GET_INDICES_BUF_SIZE) + size = RTAS_GET_INDICES_BUF_SIZE; + + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); +} + +static const struct file_operations papr_indices_handle_ops = { + .read = papr_indices_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/* + * papr_indices_create_handle() - Create a fd-based handle for reading + * indices data + * @ubuf: Input parameters to RTAS call such as whether sensor or indicator + * and indice type in user memory + * + * Handler for PAPR_INDICES_IOC_GET ioctl command. Validates @ubuf + * and instantiates an immutable indices "blob" for it. The blob is + * attached to a file descriptor for reading by user space. The memory + * backing the blob is freed when the file is released. + * + * The entire requested indices is retrieved by this call and all + * necessary RTAS interactions are performed before returning the fd + * to user space. This keeps the read handler simple and ensures that + * the kernel can prevent interleaving of ibm,get-indices call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_indices_create_handle(struct papr_indices_io_block __user *ubuf) +{ + struct papr_rtas_sequence seq = {}; + struct rtas_get_indices_params params = {}; + int fd; + + if (get_user(params.is_sensor, &ubuf->indices.is_sensor)) + return -EFAULT; + + if (get_user(params.indice_type, &ubuf->indices.indice_type)) + return -EFAULT; + + seq = (struct papr_rtas_sequence) { + .begin = indices_sequence_begin, + .end = indices_sequence_end, + .work = indices_sequence_fill_work_area, + }; + + seq.params = ¶ms; + fd = papr_rtas_setup_file_interface(&seq, + &papr_indices_handle_ops, "[papr-indices]"); + + return fd; +} + +/* + * Create work area with the input parameters. This function is used + * for both ibm,set-dynamic-indicator and ibm,get-dynamic-sensor-state + * RTAS Calls. + */ +static struct rtas_work_area * +papr_dynamic_indice_buf_from_user(struct papr_indices_io_block __user *ubuf, + struct papr_indices_io_block *kbuf) +{ + struct rtas_work_area *work_area; + u32 length; + __be32 len_be; + + if (copy_from_user(kbuf, ubuf, sizeof(*kbuf))) + return ERR_PTR(-EFAULT); + + + if (!string_is_terminated(kbuf->dynamic_param.location_code_str, + ARRAY_SIZE(kbuf->dynamic_param.location_code_str))) + return ERR_PTR(-EINVAL); + + /* + * The input data in the work area should be as follows: + * - 32-bit integer length of the location code string, + * including NULL. + * - Location code string, NULL terminated, identifying the + * token (sensor or indicator). + * PAPR 2.13 - R1–7.3.18–5 ibm,set-dynamic-indicator + * - R1–7.3.19–5 ibm,get-dynamic-sensor-state + */ + /* + * Length that user space passed should also include NULL + * terminator. + */ + length = strlen(kbuf->dynamic_param.location_code_str) + 1; + if (length > LOC_CODE_SIZE) + return ERR_PTR(-EINVAL); + + len_be = cpu_to_be32(length); + + work_area = rtas_work_area_alloc(LOC_CODE_SIZE + sizeof(u32)); + memcpy(rtas_work_area_raw_buf(work_area), &len_be, sizeof(u32)); + memcpy((rtas_work_area_raw_buf(work_area) + sizeof(u32)), + &kbuf->dynamic_param.location_code_str, length); + + return work_area; +} + +/** + * papr_dynamic_indicator_ioc_set - ibm,set-dynamic-indicator RTAS Call + * PAPR 2.13 7.3.18 + * + * @ubuf: Input parameters to RTAS call such as indicator token and + * new state. + * + * Returns success or -errno. + */ +static long papr_dynamic_indicator_ioc_set(struct papr_indices_io_block __user *ubuf) +{ + struct papr_indices_io_block kbuf; + struct rtas_work_area *work_area; + s32 fwrc, token, ret; + + token = rtas_function_token(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + mutex_lock(&rtas_ibm_set_dynamic_indicator_lock); + work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf); + if (IS_ERR(work_area)) { + ret = PTR_ERR(work_area); + goto out; + } + + do { + fwrc = rtas_call(token, 3, 1, NULL, + kbuf.dynamic_param.token, + kbuf.dynamic_param.state, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + rtas_work_area_free(work_area); + + switch (fwrc) { + case RTAS_SUCCESS: + ret = 0; + break; + case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */ + ret = -EOPNOTSUPP; + break; + default: + pr_err("unexpected ibm,set-dynamic-indicator result %d\n", + fwrc); + fallthrough; + case RTAS_HARDWARE_ERROR: /* Hardware/platform error */ + ret = -EIO; + break; + } + +out: + mutex_unlock(&rtas_ibm_set_dynamic_indicator_lock); + return ret; +} + +/** + * papr_dynamic_sensor_ioc_get - ibm,get-dynamic-sensor-state RTAS Call + * PAPR 2.13 7.3.19 + * + * @ubuf: Input parameters to RTAS call such as sensor token + * Copies the state in user space buffer. + * + * + * Returns success or -errno. + */ + +static long papr_dynamic_sensor_ioc_get(struct papr_indices_io_block __user *ubuf) +{ + struct papr_indices_io_block kbuf; + struct rtas_work_area *work_area; + s32 fwrc, token, ret; + u32 rets; + + token = rtas_function_token(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + mutex_lock(&rtas_ibm_get_dynamic_sensor_state_lock); + work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf); + if (IS_ERR(work_area)) { + ret = PTR_ERR(work_area); + goto out; + } + + do { + fwrc = rtas_call(token, 2, 2, &rets, + kbuf.dynamic_param.token, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + rtas_work_area_free(work_area); + + switch (fwrc) { + case RTAS_SUCCESS: + if (put_user(rets, &ubuf->dynamic_param.state)) + ret = -EFAULT; + else + ret = 0; + break; + case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */ + ret = -EOPNOTSUPP; + break; + default: + pr_err("unexpected ibm,get-dynamic-sensor result %d\n", + fwrc); + fallthrough; + case RTAS_HARDWARE_ERROR: /* Hardware/platform error */ + ret = -EIO; + break; + } + +out: + mutex_unlock(&rtas_ibm_get_dynamic_sensor_state_lock); + return ret; +} + +/* + * Top-level ioctl handler for /dev/papr-indices. + */ +static long papr_indices_dev_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_INDICES_IOC_GET: + ret = papr_indices_create_handle(argp); + break; + case PAPR_DYNAMIC_SENSOR_IOC_GET: + ret = papr_dynamic_sensor_ioc_get(argp); + break; + case PAPR_DYNAMIC_INDICATOR_IOC_SET: + if (filp->f_mode & FMODE_WRITE) + ret = papr_dynamic_indicator_ioc_set(argp); + else + ret = -EBADF; + break; + default: + ret = -ENOIOCTLCMD; + break; + } + + return ret; +} + +static const struct file_operations papr_indices_ops = { + .unlocked_ioctl = papr_indices_dev_ioctl, +}; + +static struct miscdevice papr_indices_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-indices", + .fops = &papr_indices_ops, +}; + +static __init int papr_indices_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_GET_INDICES)) + return -ENODEV; + + if (!rtas_function_implemented(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR)) + return -ENODEV; + + if (!rtas_function_implemented(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE)) + return -ENODEV; + + return misc_register(&papr_indices_dev); +} +machine_device_initcall(pseries, papr_indices_init); diff --git a/arch/powerpc/platforms/pseries/papr-phy-attest.c b/arch/powerpc/platforms/pseries/papr-phy-attest.c new file mode 100644 index 000000000000..1907f2411567 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-phy-attest.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-phy-attest: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-physical-attestation.h> +#include "papr-rtas-common.h" + +/** + * struct rtas_phy_attest_params - Parameters (in and out) for + * ibm,physical-attestation. + * + * @cmd: In: Caller-provided attestation command buffer. Must be + * RTAS-addressable. + * @work_area: In: Caller-provided work area buffer for attestation + * command structure + * Out: Caller-provided work area buffer for the response + * @cmd_len: In: Caller-provided attestation command structure + * length + * @sequence: In: Sequence number. Out: Next sequence number. + * @written: Out: Bytes written by ibm,physical-attestation to + * @work_area. + * @status: Out: RTAS call status. + */ +struct rtas_phy_attest_params { + struct papr_phy_attest_io_block cmd; + struct rtas_work_area *work_area; + u32 cmd_len; + u32 sequence; + u32 written; + s32 status; +}; + +/** + * rtas_physical_attestation() - Call ibm,physical-attestation to + * fill a work area buffer. + * @params: See &struct rtas_phy_attest_params. + * + * Calls ibm,physical-attestation until it errors or successfully + * deposits data into the supplied work area. Handles RTAS retry + * statuses. Maps RTAS error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_physical_attestation() + * multiple times to retrieve all the data for the provided + * attestation command. Only one sequence should be in progress at + * any time; starting a new sequence will disrupt any sequence + * already in progress. Serialization of attestation retrieval + * sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_physical_attestation(struct rtas_phy_attest_params *params) +{ + struct rtas_work_area *work_area; + s32 fwrc, token; + u32 rets[2]; + int ret; + + work_area = params->work_area; + token = rtas_function_token(RTAS_FN_IBM_PHYSICAL_ATTESTATION); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + lockdep_assert_held(&rtas_ibm_physical_attestation_lock); + + do { + fwrc = rtas_call(token, 3, 3, rets, + rtas_work_area_phys(work_area), + params->cmd_len, + params->sequence); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: + ret = -EINVAL; + break; + case RTAS_SEQ_MORE_DATA: + params->sequence = rets[0]; + fallthrough; + case RTAS_SEQ_COMPLETE: + params->written = rets[1]; + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(params->written > rtas_work_area_size(work_area), + "possible write beyond end of work area")) + ret = -EFAULT; + else + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-phy_attest status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal physical-attestation sequence APIs. A physical-attestation + * sequence is a series of calls to get ibm,physical-attestation + * for a given attestation command. The sequence ends when an error + * is encountered or all data for the attestation command has been + * returned. + */ + +/** + * phy_attest_sequence_begin() - Begin a response data for attestation + * command retrieval sequence. + * @seq: user specified parameters for RTAS call from seq struct. + * + * Context: May sleep. + */ +static void phy_attest_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_phy_attest_params *param; + + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_physical_attestation_lock); + param = (struct rtas_phy_attest_params *)seq->params; + param->work_area = rtas_work_area_alloc(SZ_4K); + memcpy(rtas_work_area_raw_buf(param->work_area), ¶m->cmd, + param->cmd_len); + param->sequence = 1; + param->status = 0; +} + +/** + * phy_attest_sequence_end() - Finalize a attestation command + * response retrieval sequence. + * @seq: Sequence state. + * + * Releases resources obtained by phy_attest_sequence_begin(). + */ +static void phy_attest_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_phy_attest_params *param; + + param = (struct rtas_phy_attest_params *)seq->params; + rtas_work_area_free(param->work_area); + mutex_unlock(&rtas_ibm_physical_attestation_lock); + kfree(param); +} + +/* + * Generator function to be passed to papr_rtas_blob_generate(). + */ +static const char *phy_attest_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_phy_attest_params *p; + bool init_state; + + p = (struct rtas_phy_attest_params *)seq->params; + init_state = (p->written == 0) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_physical_attestation(p))) + return NULL; + *len = p->written; + return rtas_work_area_raw_buf(p->work_area); +} + +static const struct file_operations papr_phy_attest_handle_ops = { + .read = papr_rtas_common_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/** + * papr_phy_attest_create_handle() - Create a fd-based handle for + * reading the response for the given attestation command. + * @ulc: Attestation command in user memory; defines the scope of + * data for the attestation command to retrieve. + * + * Handler for PAPR_PHYSICAL_ATTESTATION_IOC_CREATE_HANDLE ioctl + * command. Validates @ulc and instantiates an immutable response + * "blob" for attestation command. The blob is attached to a file + * descriptor for reading by user space. The memory backing the blob + * is freed when the file is released. + * + * The entire requested response buffer for the attestation command + * retrieved by this call and all necessary RTAS interactions are + * performed before returning the fd to user space. This keeps the + * read handler simple and ensures that kernel can prevent + * interleaving ibm,physical-attestation call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_phy_attest_create_handle(struct papr_phy_attest_io_block __user *ulc) +{ + struct rtas_phy_attest_params *params; + struct papr_rtas_sequence seq = {}; + int fd; + + /* + * Freed in phy_attest_sequence_end(). + */ + params = kzalloc(sizeof(*params), GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + if (copy_from_user(¶ms->cmd, ulc, + sizeof(struct papr_phy_attest_io_block))) + return -EFAULT; + + params->cmd_len = be32_to_cpu(params->cmd.length); + seq = (struct papr_rtas_sequence) { + .begin = phy_attest_sequence_begin, + .end = phy_attest_sequence_end, + .work = phy_attest_sequence_fill_work_area, + }; + + seq.params = (void *)params; + + fd = papr_rtas_setup_file_interface(&seq, + &papr_phy_attest_handle_ops, + "[papr-physical-attestation]"); + + return fd; +} + +/* + * Top-level ioctl handler for /dev/papr-physical-attestation. + */ +static long papr_phy_attest_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_PHY_ATTEST_IOC_HANDLE: + ret = papr_phy_attest_create_handle(argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_phy_attest_ops = { + .unlocked_ioctl = papr_phy_attest_dev_ioctl, +}; + +static struct miscdevice papr_phy_attest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-physical-attestation", + .fops = &papr_phy_attest_ops, +}; + +static __init int papr_phy_attest_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_PHYSICAL_ATTESTATION)) + return -ENODEV; + + return misc_register(&papr_phy_attest_dev); +} +machine_device_initcall(pseries, papr_phy_attest_init); diff --git a/arch/powerpc/platforms/pseries/papr-platform-dump.c b/arch/powerpc/platforms/pseries/papr-platform-dump.c new file mode 100644 index 000000000000..f8d55eccdb6b --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-platform-dump.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-platform-dump: " fmt + +#include <linux/anon_inodes.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-platform-dump.h> + +/* + * Function-specific return values for ibm,platform-dump, derived from + * PAPR+ v2.13 7.3.3.4.1 "ibm,platform-dump RTAS Call". + */ +#define RTAS_IBM_PLATFORM_DUMP_COMPLETE 0 /* Complete dump retrieved. */ +#define RTAS_IBM_PLATFORM_DUMP_CONTINUE 1 /* Continue dump */ +#define RTAS_NOT_AUTHORIZED -9002 /* Not Authorized */ + +#define RTAS_IBM_PLATFORM_DUMP_START 2 /* Linux status to start dump */ + +/** + * struct ibm_platform_dump_params - Parameters (in and out) for + * ibm,platform-dump + * @work_area: In: work area buffer for results. + * @buf_length: In: work area buffer length in bytes + * @dump_tag_hi: In: Most-significant 32 bits of a Dump_Tag representing + * an id of the dump being processed. + * @dump_tag_lo: In: Least-significant 32 bits of a Dump_Tag representing + * an id of the dump being processed. + * @sequence_hi: In: Sequence number in most-significant 32 bits. + * Out: Next sequence number in most-significant 32 bits. + * @sequence_lo: In: Sequence number in Least-significant 32 bits + * Out: Next sequence number in Least-significant 32 bits. + * @bytes_ret_hi: Out: Bytes written in most-significant 32 bits. + * @bytes_ret_lo: Out: Bytes written in Least-significant 32 bits. + * @status: Out: RTAS call status. + * @list: Maintain the list of dumps are in progress. Can + * retrieve multiple dumps with different dump IDs at + * the same time but not with the same dump ID. This list + * is used to determine whether the dump for the same ID + * is in progress. + */ +struct ibm_platform_dump_params { + struct rtas_work_area *work_area; + u32 buf_length; + u32 dump_tag_hi; + u32 dump_tag_lo; + u32 sequence_hi; + u32 sequence_lo; + u32 bytes_ret_hi; + u32 bytes_ret_lo; + s32 status; + struct list_head list; +}; + +/* + * Multiple dumps with different dump IDs can be retrieved at the same + * time, but not with dame dump ID. platform_dump_list_mutex and + * platform_dump_list are used to prevent this behavior. + */ +static DEFINE_MUTEX(platform_dump_list_mutex); +static LIST_HEAD(platform_dump_list); + +/** + * rtas_ibm_platform_dump() - Call ibm,platform-dump to fill a work area + * buffer. + * @params: See &struct ibm_platform_dump_params. + * @buf_addr: Address of dump buffer (work_area) + * @buf_length: Length of the buffer in bytes (min. 1024) + * + * Calls ibm,platform-dump until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * Can request multiple dumps with different dump IDs at the same time, + * but not with the same dump ID which is prevented with the check in + * the ioctl code (papr_platform_dump_create_handle()). + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 for dump complete and 1 for continue dump + */ +static int rtas_ibm_platform_dump(struct ibm_platform_dump_params *params, + phys_addr_t buf_addr, u32 buf_length) +{ + u32 rets[4]; + s32 fwrc; + int ret = 0; + + do { + fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_PLATFORM_DUMP), + 6, 5, + rets, + params->dump_tag_hi, + params->dump_tag_lo, + params->sequence_hi, + params->sequence_lo, + buf_addr, + buf_length); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_NOT_AUTHORIZED: + ret = -EPERM; + break; + case RTAS_IBM_PLATFORM_DUMP_CONTINUE: + case RTAS_IBM_PLATFORM_DUMP_COMPLETE: + params->sequence_hi = rets[0]; + params->sequence_lo = rets[1]; + params->bytes_ret_hi = rets[2]; + params->bytes_ret_lo = rets[3]; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,platform-dump status %d\n", + fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Platform dump is used with multiple RTAS calls to retrieve the + * complete dump for the provided dump ID. Once the complete dump is + * retrieved, the hypervisor returns dump complete status (0) for the + * last RTAS call and expects the caller issues one more call with + * NULL buffer to invalidate the dump so that the hypervisor can remove + * the dump. + * + * After the specific dump is invalidated in the hypervisor, expect the + * dump complete status for the new sequence - the user space initiates + * new request for the same dump ID. + */ +static ssize_t papr_platform_dump_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + struct ibm_platform_dump_params *params = file->private_data; + u64 total_bytes; + s32 fwrc; + + /* + * Dump already completed with the previous read calls. + * In case if the user space issues further reads, returns + * -EINVAL. + */ + if (!params->buf_length) { + pr_warn_once("Platform dump completed for dump ID %llu\n", + (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo)); + return -EINVAL; + } + + /* + * The hypervisor returns status 0 if no more data available to + * download. The dump will be invalidated with ioctl (see below). + */ + if (params->status == RTAS_IBM_PLATFORM_DUMP_COMPLETE) { + params->buf_length = 0; + /* + * Returns 0 to the user space so that user + * space read stops. + */ + return 0; + } + + if (size < SZ_1K) { + pr_err_once("Buffer length should be minimum 1024 bytes\n"); + return -EINVAL; + } else if (size > params->buf_length) { + /* + * Allocate 4K work area. So if the user requests > 4K, + * resize the buffer length. + */ + size = params->buf_length; + } + + fwrc = rtas_ibm_platform_dump(params, + rtas_work_area_phys(params->work_area), + size); + if (fwrc < 0) + return fwrc; + + total_bytes = (u64) (((u64)params->bytes_ret_hi << 32) | + params->bytes_ret_lo); + + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(total_bytes > size, "possible write beyond end of work area")) + return -EFAULT; + + if (copy_to_user(buf, rtas_work_area_raw_buf(params->work_area), + total_bytes)) + return -EFAULT; + + return total_bytes; +} + +static int papr_platform_dump_handle_release(struct inode *inode, + struct file *file) +{ + struct ibm_platform_dump_params *params = file->private_data; + + if (params->work_area) + rtas_work_area_free(params->work_area); + + mutex_lock(&platform_dump_list_mutex); + list_del(¶ms->list); + mutex_unlock(&platform_dump_list_mutex); + + kfree(params); + file->private_data = NULL; + return 0; +} + +/* + * This ioctl is used to invalidate the dump assuming the user space + * issue this ioctl after obtain the complete dump. + * Issue the last RTAS call with NULL buffer to invalidate the dump + * which means dump will be freed in the hypervisor. + */ +static long papr_platform_dump_invalidate_ioctl(struct file *file, + unsigned int ioctl, unsigned long arg) +{ + struct ibm_platform_dump_params *params; + u64 __user *argp = (void __user *)arg; + u64 param_dump_tag, dump_tag; + + if (ioctl != PAPR_PLATFORM_DUMP_IOC_INVALIDATE) + return -ENOIOCTLCMD; + + if (get_user(dump_tag, argp)) + return -EFAULT; + + /* + * private_data is freeded during release(), so should not + * happen. + */ + if (!file->private_data) { + pr_err("No valid FD to invalidate dump for the ID(%llu)\n", + dump_tag); + return -EINVAL; + } + + params = file->private_data; + param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo); + if (dump_tag != param_dump_tag) { + pr_err("Invalid dump ID(%llu) to invalidate dump\n", + dump_tag); + return -EINVAL; + } + + if (params->status != RTAS_IBM_PLATFORM_DUMP_COMPLETE) { + pr_err("Platform dump is not complete, but requested " + "to invalidate dump for ID(%llu)\n", + dump_tag); + return -EINPROGRESS; + } + + return rtas_ibm_platform_dump(params, 0, 0); +} + +static const struct file_operations papr_platform_dump_handle_ops = { + .read = papr_platform_dump_handle_read, + .release = papr_platform_dump_handle_release, + .unlocked_ioctl = papr_platform_dump_invalidate_ioctl, +}; + +/** + * papr_platform_dump_create_handle() - Create a fd-based handle for + * reading platform dump + * + * Handler for PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE ioctl command + * Allocates RTAS parameter struct and work area and attached to the + * file descriptor for reading by user space with the multiple RTAS + * calls until the dump is completed. This memory allocation is freed + * when the file is released. + * + * Multiple dump requests with different IDs are allowed at the same + * time, but not with the same dump ID. So if the user space is + * already opened file descriptor for the specific dump ID, return + * -EALREADY for the next request. + * + * @dump_tag: Dump ID for the dump requested to retrieve from the + * hypervisor + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_platform_dump_create_handle(u64 dump_tag) +{ + struct ibm_platform_dump_params *params; + u64 param_dump_tag; + struct file *file; + long err; + int fd; + + /* + * Return failure if the user space is already opened FD for + * the specific dump ID. This check will prevent multiple dump + * requests for the same dump ID at the same time. Generally + * should not expect this, but in case. + */ + list_for_each_entry(params, &platform_dump_list, list) { + param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo); + if (dump_tag == param_dump_tag) { + pr_err("Platform dump for ID(%llu) is already in progress\n", + dump_tag); + return -EALREADY; + } + } + + params = kzalloc(sizeof(struct ibm_platform_dump_params), + GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + params->work_area = rtas_work_area_alloc(SZ_4K); + params->buf_length = SZ_4K; + params->dump_tag_hi = (u32)(dump_tag >> 32); + params->dump_tag_lo = (u32)(dump_tag & 0x00000000ffffffffULL); + params->status = RTAS_IBM_PLATFORM_DUMP_START; + + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); + if (fd < 0) { + err = fd; + goto free_area; + } + + file = anon_inode_getfile_fmode("[papr-platform-dump]", + &papr_platform_dump_handle_ops, + (void *)params, O_RDONLY, + FMODE_LSEEK | FMODE_PREAD); + if (IS_ERR(file)) { + err = PTR_ERR(file); + goto put_fd; + } + + fd_install(fd, file); + + list_add(¶ms->list, &platform_dump_list); + + pr_info("%s (%d) initiated platform dump for dump tag %llu\n", + current->comm, current->pid, dump_tag); + return fd; +put_fd: + put_unused_fd(fd); +free_area: + rtas_work_area_free(params->work_area); + kfree(params); + return err; +} + +/* + * Top-level ioctl handler for /dev/papr-platform-dump. + */ +static long papr_platform_dump_dev_ioctl(struct file *filp, + unsigned int ioctl, + unsigned long arg) +{ + u64 __user *argp = (void __user *)arg; + u64 dump_tag; + long ret; + + if (get_user(dump_tag, argp)) + return -EFAULT; + + switch (ioctl) { + case PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE: + mutex_lock(&platform_dump_list_mutex); + ret = papr_platform_dump_create_handle(dump_tag); + mutex_unlock(&platform_dump_list_mutex); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_platform_dump_ops = { + .unlocked_ioctl = papr_platform_dump_dev_ioctl, +}; + +static struct miscdevice papr_platform_dump_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-platform-dump", + .fops = &papr_platform_dump_ops, +}; + +static __init int papr_platform_dump_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_PLATFORM_DUMP)) + return -ENODEV; + + return misc_register(&papr_platform_dump_dev); +} +machine_device_initcall(pseries, papr_platform_dump_init); diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.c b/arch/powerpc/platforms/pseries/papr-rtas-common.c new file mode 100644 index 000000000000..33c606e3378a --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-rtas-common.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-common: " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/sched/signal.h> +#include "papr-rtas-common.h" + +/* + * Sequence based RTAS HCALL has to issue multiple times to retrieve + * complete data from the hypervisor. For some of these RTAS calls, + * the OS should not interleave calls with different input until the + * sequence is completed. So data is collected for these calls during + * ioctl handle and export to user space with read() handle. + * This file provides common functions needed for such sequence based + * RTAS calls Ex: ibm,get-vpd and ibm,get-indices. + */ + +bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob) +{ + return blob->data && blob->len; +} + +void papr_rtas_blob_free(const struct papr_rtas_blob *blob) +{ + if (blob) { + kvfree(blob->data); + kfree(blob); + } +} + +/** + * papr_rtas_blob_extend() - Append data to a &struct papr_rtas_blob. + * @blob: The blob to extend. + * @data: The new data to append to @blob. + * @len: The length of @data. + * + * Context: May sleep. + * Return: -ENOMEM on allocation failure, 0 otherwise. + */ +static int papr_rtas_blob_extend(struct papr_rtas_blob *blob, + const char *data, size_t len) +{ + const size_t new_len = blob->len + len; + const size_t old_len = blob->len; + const char *old_ptr = blob->data; + char *new_ptr; + + new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT); + if (!new_ptr) + return -ENOMEM; + + memcpy(&new_ptr[old_len], data, len); + blob->data = new_ptr; + blob->len = new_len; + return 0; +} + +/** + * papr_rtas_blob_generate() - Construct a new &struct papr_rtas_blob. + * @seq: work function of the caller that is called to obtain + * data with the caller RTAS call. + * + * The @work callback is invoked until it returns NULL. @seq is + * passed to @work in its first argument on each call. When + * @work returns data, it should store the data length in its + * second argument. + * + * Context: May sleep. + * Return: A completely populated &struct papr_rtas_blob, or NULL on error. + */ +static const struct papr_rtas_blob * +papr_rtas_blob_generate(struct papr_rtas_sequence *seq) +{ + struct papr_rtas_blob *blob; + const char *buf; + size_t len; + int err = 0; + + blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT); + if (!blob) + return NULL; + + if (!seq->work) + return ERR_PTR(-EINVAL); + + + while (err == 0 && (buf = seq->work(seq, &len))) + err = papr_rtas_blob_extend(blob, buf, len); + + if (err != 0 || !papr_rtas_blob_has_data(blob)) + goto free_blob; + + return blob; +free_blob: + papr_rtas_blob_free(blob); + return NULL; +} + +int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, int err) +{ + /* Preserve the first error recorded. */ + if (seq->error == 0) + seq->error = err; + + return seq->error; +} + +/* + * Higher-level retrieval code below. These functions use the + * papr_rtas_blob_* and sequence_* APIs defined above to create fd-based + * handles for consumption by user space. + */ + +/** + * papr_rtas_run_sequence() - Run a single retrieval sequence. + * @seq: Functions of the caller to complete the sequence + * + * Context: May sleep. Holds a mutex and an RTAS work area for its + * duration. Typically performs multiple sleepable slab + * allocations. + * + * Return: A populated &struct papr_rtas_blob on success. Encoded error + * pointer otherwise. + */ +static const struct papr_rtas_blob *papr_rtas_run_sequence(struct papr_rtas_sequence *seq) +{ + const struct papr_rtas_blob *blob; + + if (seq->begin) + seq->begin(seq); + + blob = papr_rtas_blob_generate(seq); + if (!blob) + papr_rtas_sequence_set_err(seq, -ENOMEM); + + if (seq->end) + seq->end(seq); + + + if (seq->error) { + papr_rtas_blob_free(blob); + return ERR_PTR(seq->error); + } + + return blob; +} + +/** + * papr_rtas_retrieve() - Return the data blob that is exposed to + * user space. + * @seq: RTAS call specific functions to be invoked until the + * sequence is completed. + * + * Run sequences against @param until a blob is successfully + * instantiated, or a hard error is encountered, or a fatal signal is + * pending. + * + * Context: May sleep. + * Return: A fully populated data blob when successful. Encoded error + * pointer otherwise. + */ +const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq) +{ + const struct papr_rtas_blob *blob; + + /* + * EAGAIN means the sequence returns error with a -4 (data + * changed and need to start the sequence) status from RTAS calls + * and we should attempt a new sequence. PAPR+ (v2.13 R1–7.3.20–5 + * - ibm,get-vpd, R1–7.3.17–6 - ibm,get-indices) indicates that + * this should be a transient condition, not something that + * happens continuously. But we'll stop trying on a fatal signal. + */ + do { + blob = papr_rtas_run_sequence(seq); + if (!IS_ERR(blob)) /* Success. */ + break; + if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */ + break; + cond_resched(); + } while (!fatal_signal_pending(current)); + + return blob; +} + +/** + * papr_rtas_setup_file_interface - Complete the sequence and obtain + * the data and export to user space with fd-based handles. Then the + * user spave gets the data with read() handle. + * @seq: RTAS call specific functions to get the data. + * @fops: RTAS call specific file operations such as read(). + * @name: RTAS call specific char device node. + * + * Return: FD handle for consumption by user space + */ +long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq, + const struct file_operations *fops, + char *name) +{ + const struct papr_rtas_blob *blob; + struct file *file; + long ret; + int fd; + + blob = papr_rtas_retrieve(seq); + if (IS_ERR(blob)) + return PTR_ERR(blob); + + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto free_blob; + } + + file = anon_inode_getfile_fmode(name, fops, (void *)blob, + O_RDONLY, FMODE_LSEEK | FMODE_PREAD); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto put_fd; + } + + fd_install(fd, file); + return fd; + +put_fd: + put_unused_fd(fd); +free_blob: + papr_rtas_blob_free(blob); + return ret; +} + +/* + * papr_rtas_sequence_should_stop() - Determine whether RTAS retrieval + * sequence should continue. + * + * Examines the sequence error state and outputs of the last call to + * the specific RTAS to determine whether the sequence in progress + * should continue or stop. + * + * Return: True if the sequence has encountered an error or if all data + * for this sequence has been retrieved. False otherwise. + */ +bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq, + s32 status, bool init_state) +{ + bool done; + + if (seq->error) + return true; + + switch (status) { + case RTAS_SEQ_COMPLETE: + if (init_state) + done = false; /* Initial state. */ + else + done = true; /* All data consumed. */ + break; + case RTAS_SEQ_MORE_DATA: + done = false; /* More data available. */ + break; + default: + done = true; /* Error encountered. */ + break; + } + + return done; +} + +/* + * User space read to retrieve data for the corresponding RTAS call. + * papr_rtas_blob is filled with the data using the corresponding RTAS + * call sequence API. + */ +ssize_t papr_rtas_common_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + const struct papr_rtas_blob *blob = file->private_data; + + /* We should not instantiate a handle without any data attached. */ + if (!papr_rtas_blob_has_data(blob)) { + pr_err_once("handle without data\n"); + return -EIO; + } + + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); +} + +int papr_rtas_common_handle_release(struct inode *inode, + struct file *file) +{ + const struct papr_rtas_blob *blob = file->private_data; + + papr_rtas_blob_free(blob); + + return 0; +} + +loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off, + int whence) +{ + const struct papr_rtas_blob *blob = file->private_data; + + return fixed_size_llseek(file, off, whence, blob->len); +} diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.h b/arch/powerpc/platforms/pseries/papr-rtas-common.h new file mode 100644 index 000000000000..4ceabcaf4905 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-rtas-common.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_POWERPC_PAPR_RTAS_COMMON_H +#define _ASM_POWERPC_PAPR_RTAS_COMMON_H + +#include <linux/types.h> + +/* + * Return codes for sequence based RTAS calls. + * Not listed under PAPR+ v2.13 7.2.8: "Return Codes". + * But defined in the specific section of each RTAS call. + */ +#define RTAS_SEQ_COMPLETE 0 /* All data has been retrieved. */ +#define RTAS_SEQ_MORE_DATA 1 /* More data is available */ +#define RTAS_SEQ_START_OVER -4 /* Data changed, restart call sequence. */ + +/* + * Internal "blob" APIs for accumulating RTAS call results into + * an immutable buffer to be attached to a file descriptor. + */ +struct papr_rtas_blob { + const char *data; + size_t len; +}; + +/** + * struct papr_sequence - State for managing a sequence of RTAS calls. + * @error: Shall be zero as long as the sequence has not encountered an error, + * -ve errno otherwise. Use papr_rtas_sequence_set_err() to update. + * @params: Parameter block to pass to rtas_*() calls. + * @begin: Work area allocation and initialize the needed parameter + * values passed to RTAS call + * @end: Free the allocated work area + * @work: Obtain data with RTAS call and invoke it until the sequence is + * completed. + * + */ +struct papr_rtas_sequence { + int error; + void *params; + void (*begin)(struct papr_rtas_sequence *seq); + void (*end)(struct papr_rtas_sequence *seq); + const char *(*work)(struct papr_rtas_sequence *seq, size_t *len); +}; + +extern bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob); +extern void papr_rtas_blob_free(const struct papr_rtas_blob *blob); +extern int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, + int err); +extern const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq); +extern long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq, + const struct file_operations *fops, char *name); +extern bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq, + s32 status, bool init_state); +extern ssize_t papr_rtas_common_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off); +extern int papr_rtas_common_handle_release(struct inode *inode, + struct file *file); +extern loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off, + int whence); +#endif /* _ASM_POWERPC_PAPR_RTAS_COMMON_H */ + diff --git a/arch/powerpc/platforms/pseries/papr-sysparm.c b/arch/powerpc/platforms/pseries/papr-sysparm.c new file mode 100644 index 000000000000..7063ce8884e4 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-sysparm.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-sysparm: " fmt + +#include <linux/anon_inodes.h> +#include <linux/bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/papr-sysparm.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> + +struct papr_sysparm_buf *papr_sysparm_buf_alloc(void) +{ + struct papr_sysparm_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL); + + return buf; +} + +void papr_sysparm_buf_free(struct papr_sysparm_buf *buf) +{ + kfree(buf); +} + +static size_t papr_sysparm_buf_get_length(const struct papr_sysparm_buf *buf) +{ + return be16_to_cpu(buf->len); +} + +static void papr_sysparm_buf_set_length(struct papr_sysparm_buf *buf, size_t length) +{ + WARN_ONCE(length > sizeof(buf->val), + "bogus length %zu, clamping to safe value", length); + length = min(sizeof(buf->val), length); + buf->len = cpu_to_be16(length); +} + +/* + * For use on buffers returned from ibm,get-system-parameter before + * returning them to callers. Ensures the encoded length of valid data + * cannot overrun buf->val[]. + */ +static void papr_sysparm_buf_clamp_length(struct papr_sysparm_buf *buf) +{ + papr_sysparm_buf_set_length(buf, papr_sysparm_buf_get_length(buf)); +} + +/* + * Perform some basic diligence on the system parameter buffer before + * submitting it to RTAS. + */ +static bool papr_sysparm_buf_can_submit(const struct papr_sysparm_buf *buf) +{ + /* + * Firmware ought to reject buffer lengths that exceed the + * maximum specified in PAPR, but there's no reason for the + * kernel to allow them either. + */ + if (papr_sysparm_buf_get_length(buf) > sizeof(buf->val)) + return false; + + return true; +} + +/** + * papr_sysparm_get() - Retrieve the value of a PAPR system parameter. + * @param: PAPR system parameter token as described in + * 7.3.16 "System Parameters Option". + * @buf: A &struct papr_sysparm_buf as returned from papr_sysparm_buf_alloc(). + * + * Place the result of querying the specified parameter, if available, + * in @buf. The result includes a be16 length header followed by the + * value, which may be a string or binary data. See &struct papr_sysparm_buf. + * + * Since there is at least one parameter (60, OS Service Entitlement + * Status) where the results depend on the incoming contents of the + * work area, the caller-supplied buffer is copied unmodified into the + * work area before calling ibm,get-system-parameter. + * + * A defined parameter may not be implemented on a given system, and + * some implemented parameters may not be available to all partitions + * on a system. A parameter's disposition may change at any time due + * to system configuration changes or partition migration. + * + * Context: This function may sleep. + * + * Return: 0 on success, -errno otherwise. @buf is unmodified on error. + */ +int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf) +{ + const s32 token = rtas_function_token(RTAS_FN_IBM_GET_SYSTEM_PARAMETER); + struct rtas_work_area *work_area; + s32 fwrc; + int ret; + + might_sleep(); + + if (WARN_ON(!buf)) + return -EFAULT; + + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + if (!papr_sysparm_buf_can_submit(buf)) + return -EINVAL; + + work_area = rtas_work_area_alloc(sizeof(*buf)); + + memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf)); + + do { + fwrc = rtas_call(token, 3, 1, NULL, param.token, + rtas_work_area_phys(work_area), + rtas_work_area_size(work_area)); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case 0: + ret = 0; + memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf)); + papr_sysparm_buf_clamp_length(buf); + break; + case -3: /* parameter not implemented */ + ret = -EOPNOTSUPP; + break; + case -9002: /* this partition not authorized to retrieve this parameter */ + ret = -EPERM; + break; + case -9999: /* "parameter error" e.g. the buffer is too small */ + ret = -EINVAL; + break; + default: + pr_err("unexpected ibm,get-system-parameter result %d\n", fwrc); + fallthrough; + case -1: /* Hardware/platform error */ + ret = -EIO; + break; + } + + rtas_work_area_free(work_area); + + return ret; +} + +int papr_sysparm_set(papr_sysparm_t param, const struct papr_sysparm_buf *buf) +{ + const s32 token = rtas_function_token(RTAS_FN_IBM_SET_SYSTEM_PARAMETER); + struct rtas_work_area *work_area; + s32 fwrc; + int ret; + + might_sleep(); + + if (WARN_ON(!buf)) + return -EFAULT; + + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + if (!papr_sysparm_buf_can_submit(buf)) + return -EINVAL; + + work_area = rtas_work_area_alloc(sizeof(*buf)); + + memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf)); + + do { + fwrc = rtas_call(token, 2, 1, NULL, param.token, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case 0: + ret = 0; + break; + case -3: /* parameter not supported */ + ret = -EOPNOTSUPP; + break; + case -9002: /* this partition not authorized to modify this parameter */ + ret = -EPERM; + break; + case -9999: /* "parameter error" e.g. invalid input data */ + ret = -EINVAL; + break; + default: + pr_err("unexpected ibm,set-system-parameter result %d\n", fwrc); + fallthrough; + case -1: /* Hardware/platform error */ + ret = -EIO; + break; + } + + rtas_work_area_free(work_area); + + return ret; +} + +static struct papr_sysparm_buf * +papr_sysparm_buf_from_user(const struct papr_sysparm_io_block __user *user_iob) +{ + struct papr_sysparm_buf *kern_spbuf; + long err; + u16 len; + + /* + * The length of valid data that userspace claims to be in + * user_iob->data[]. + */ + if (get_user(len, &user_iob->length)) + return ERR_PTR(-EFAULT); + + static_assert(sizeof(user_iob->data) >= PAPR_SYSPARM_MAX_INPUT); + static_assert(sizeof(kern_spbuf->val) >= PAPR_SYSPARM_MAX_INPUT); + + if (len > PAPR_SYSPARM_MAX_INPUT) + return ERR_PTR(-EINVAL); + + kern_spbuf = papr_sysparm_buf_alloc(); + if (!kern_spbuf) + return ERR_PTR(-ENOMEM); + + papr_sysparm_buf_set_length(kern_spbuf, len); + + if (len > 0 && copy_from_user(kern_spbuf->val, user_iob->data, len)) { + err = -EFAULT; + goto free_sysparm_buf; + } + + return kern_spbuf; + +free_sysparm_buf: + papr_sysparm_buf_free(kern_spbuf); + return ERR_PTR(err); +} + +static int papr_sysparm_buf_to_user(const struct papr_sysparm_buf *kern_spbuf, + struct papr_sysparm_io_block __user *user_iob) +{ + u16 len_out = papr_sysparm_buf_get_length(kern_spbuf); + + if (put_user(len_out, &user_iob->length)) + return -EFAULT; + + static_assert(sizeof(user_iob->data) >= PAPR_SYSPARM_MAX_OUTPUT); + static_assert(sizeof(kern_spbuf->val) >= PAPR_SYSPARM_MAX_OUTPUT); + + if (copy_to_user(user_iob->data, kern_spbuf->val, PAPR_SYSPARM_MAX_OUTPUT)) + return -EFAULT; + + return 0; +} + +static long papr_sysparm_ioctl_get(struct papr_sysparm_io_block __user *user_iob) +{ + struct papr_sysparm_buf *kern_spbuf; + papr_sysparm_t param; + long ret; + + if (get_user(param.token, &user_iob->parameter)) + return -EFAULT; + + kern_spbuf = papr_sysparm_buf_from_user(user_iob); + if (IS_ERR(kern_spbuf)) + return PTR_ERR(kern_spbuf); + + ret = papr_sysparm_get(param, kern_spbuf); + if (ret) + goto free_sysparm_buf; + + ret = papr_sysparm_buf_to_user(kern_spbuf, user_iob); + if (ret) + goto free_sysparm_buf; + + ret = 0; + +free_sysparm_buf: + papr_sysparm_buf_free(kern_spbuf); + return ret; +} + + +static long papr_sysparm_ioctl_set(struct papr_sysparm_io_block __user *user_iob) +{ + struct papr_sysparm_buf *kern_spbuf; + papr_sysparm_t param; + long ret; + + if (get_user(param.token, &user_iob->parameter)) + return -EFAULT; + + kern_spbuf = papr_sysparm_buf_from_user(user_iob); + if (IS_ERR(kern_spbuf)) + return PTR_ERR(kern_spbuf); + + ret = papr_sysparm_set(param, kern_spbuf); + if (ret) + goto free_sysparm_buf; + + ret = 0; + +free_sysparm_buf: + papr_sysparm_buf_free(kern_spbuf); + return ret; +} + +static long papr_sysparm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_SYSPARM_IOC_GET: + ret = papr_sysparm_ioctl_get(argp); + break; + case PAPR_SYSPARM_IOC_SET: + if (filp->f_mode & FMODE_WRITE) + ret = papr_sysparm_ioctl_set(argp); + else + ret = -EBADF; + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_sysparm_ops = { + .unlocked_ioctl = papr_sysparm_ioctl, +}; + +static struct miscdevice papr_sysparm_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-sysparm", + .fops = &papr_sysparm_ops, +}; + +static __init int papr_sysparm_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_GET_SYSTEM_PARAMETER)) + return -ENODEV; + + return misc_register(&papr_sysparm_dev); +} +machine_device_initcall(pseries, papr_sysparm_init); diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c new file mode 100644 index 000000000000..f38c188fc4a1 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-vpd.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-vpd: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/papr-vpd.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-vpd.h> +#include "papr-rtas-common.h" + +/** + * struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd. + * @loc_code: In: Caller-provided location code buffer. Must be RTAS-addressable. + * @work_area: In: Caller-provided work area buffer for results. + * @sequence: In: Sequence number. Out: Next sequence number. + * @written: Out: Bytes written by ibm,get-vpd to @work_area. + * @status: Out: RTAS call status. + */ +struct rtas_ibm_get_vpd_params { + const struct papr_location_code *loc_code; + struct rtas_work_area *work_area; + u32 sequence; + u32 written; + s32 status; +}; + +/** + * rtas_ibm_get_vpd() - Call ibm,get-vpd to fill a work area buffer. + * @params: See &struct rtas_ibm_get_vpd_params. + * + * Calls ibm,get-vpd until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_ibm_get_vpd() multiple times + * to retrieve all the VPD for the provided location code. Only one + * sequence should be in progress at any time; starting a new sequence + * will disrupt any sequence already in progress. Serialization of VPD + * retrieval sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params) +{ + const struct papr_location_code *loc_code = params->loc_code; + struct rtas_work_area *work_area = params->work_area; + u32 rets[2]; + s32 fwrc; + int ret; + + lockdep_assert_held(&rtas_ibm_get_vpd_lock); + + do { + fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_GET_VPD), 4, 3, + rets, + __pa(loc_code), + rtas_work_area_phys(work_area), + rtas_work_area_size(work_area), + params->sequence); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: + ret = -EINVAL; + break; + case RTAS_SEQ_START_OVER: + ret = -EAGAIN; + pr_info_ratelimited("VPD changed during retrieval, retrying\n"); + break; + case RTAS_SEQ_MORE_DATA: + params->sequence = rets[0]; + fallthrough; + case RTAS_SEQ_COMPLETE: + params->written = rets[1]; + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(params->written > rtas_work_area_size(work_area), + "possible write beyond end of work area")) + ret = -EFAULT; + else + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-vpd status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal VPD sequence APIs. A VPD sequence is a series of calls to + * ibm,get-vpd for a given location code. The sequence ends when an + * error is encountered or all VPD for the location code has been + * returned. + */ + +/** + * vpd_sequence_begin() - Begin a VPD retrieval sequence. + * @seq: vpd call parameters from sequence struct + * + * Context: May sleep. + */ +static void vpd_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_ibm_get_vpd_params *vpd_params; + /* + * Use a static data structure for the location code passed to + * RTAS to ensure it's in the RMA and avoid a separate work + * area allocation. Guarded by the function lock. + */ + static struct papr_location_code static_loc_code; + + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_get_vpd_lock); + static_loc_code = *(struct papr_location_code *)vpd_params->loc_code; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + vpd_params->work_area = rtas_work_area_alloc(SZ_4K); + vpd_params->loc_code = &static_loc_code; + vpd_params->sequence = 1; + vpd_params->status = 0; +} + +/** + * vpd_sequence_end() - Finalize a VPD retrieval sequence. + * @seq: Sequence state. + * + * Releases resources obtained by vpd_sequence_begin(). + */ +static void vpd_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_ibm_get_vpd_params *vpd_params; + + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + rtas_work_area_free(vpd_params->work_area); + mutex_unlock(&rtas_ibm_get_vpd_lock); +} + +/* + * Generator function to be passed to papr_rtas_blob_generate(). + */ +static const char *vpd_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_ibm_get_vpd_params *p; + bool init_state; + + p = (struct rtas_ibm_get_vpd_params *)seq->params; + init_state = (p->written == 0) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_vpd(p))) + return NULL; + *len = p->written; + return rtas_work_area_raw_buf(p->work_area); +} + +static const struct file_operations papr_vpd_handle_ops = { + .read = papr_rtas_common_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/** + * papr_vpd_create_handle() - Create a fd-based handle for reading VPD. + * @ulc: Location code in user memory; defines the scope of the VPD to + * retrieve. + * + * Handler for PAPR_VPD_IOC_CREATE_HANDLE ioctl command. Validates + * @ulc and instantiates an immutable VPD "blob" for it. The blob is + * attached to a file descriptor for reading by user space. The memory + * backing the blob is freed when the file is released. + * + * The entire requested VPD is retrieved by this call and all + * necessary RTAS interactions are performed before returning the fd + * to user space. This keeps the read handler simple and ensures that + * the kernel can prevent interleaving of ibm,get-vpd call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_vpd_create_handle(struct papr_location_code __user *ulc) +{ + struct rtas_ibm_get_vpd_params vpd_params = {}; + struct papr_rtas_sequence seq = {}; + struct papr_location_code klc; + int fd; + + if (copy_from_user(&klc, ulc, sizeof(klc))) + return -EFAULT; + + if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str))) + return -EINVAL; + + seq = (struct papr_rtas_sequence) { + .begin = vpd_sequence_begin, + .end = vpd_sequence_end, + .work = vpd_sequence_fill_work_area, + }; + + vpd_params.loc_code = &klc; + seq.params = (void *)&vpd_params; + + fd = papr_rtas_setup_file_interface(&seq, &papr_vpd_handle_ops, + "[papr-vpd]"); + + return fd; +} + +/* + * Top-level ioctl handler for /dev/papr-vpd. + */ +static long papr_vpd_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_VPD_IOC_CREATE_HANDLE: + ret = papr_vpd_create_handle(argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_vpd_ops = { + .unlocked_ioctl = papr_vpd_dev_ioctl, +}; + +static struct miscdevice papr_vpd_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-vpd", + .fops = &papr_vpd_ops, +}; + +static __init int papr_vpd_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_GET_VPD)) + return -ENODEV; + + return misc_register(&papr_vpd_dev); +} +machine_device_initcall(pseries, papr_vpd_init); diff --git a/arch/powerpc/platforms/pseries/papr_platform_attributes.c b/arch/powerpc/platforms/pseries/papr_platform_attributes.c index 515150417bb3..eea2041b270b 100644 --- a/arch/powerpc/platforms/pseries/papr_platform_attributes.c +++ b/arch/powerpc/platforms/pseries/papr_platform_attributes.c @@ -22,6 +22,7 @@ #include <asm/hvcall.h> #include <asm/machdep.h> +#include <asm/firmware.h> #include "pseries.h" @@ -100,10 +101,12 @@ retry: esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs); temp_buf = krealloc(buf, esi_buf_size, GFP_KERNEL); - if (temp_buf) + if (temp_buf) { buf = temp_buf; - else - return -ENOMEM; + } else { + ret = -ENOMEM; + goto out_buf; + } goto retry; } diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 82cae08976bc..f7c9271bda58 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/ioport.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/ndctl.h> #include <linux/sched.h> @@ -16,9 +17,10 @@ #include <linux/nd.h> #include <asm/plpar_wrappers.h> -#include <asm/papr_pdsm.h> +#include <uapi/linux/papr_pdsm.h> +#include <linux/papr_scm.h> #include <asm/mce.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/perf_event.h> #define BIND_ANY_ADDR (~0ul) @@ -29,46 +31,6 @@ (1ul << ND_CMD_SET_CONFIG_DATA) | \ (1ul << ND_CMD_CALL)) -/* DIMM health bitmap bitmap indicators */ -/* SCM device is unable to persist memory contents */ -#define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) -/* SCM device failed to persist memory contents */ -#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1)) -/* SCM device contents are persisted from previous IPL */ -#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2)) -/* SCM device contents are not persisted from previous IPL */ -#define PAPR_PMEM_EMPTY (1ULL << (63 - 3)) -/* SCM device memory life remaining is critically low */ -#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4)) -/* SCM device will be garded off next IPL due to failure */ -#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5)) -/* SCM contents cannot persist due to current platform health status */ -#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6)) -/* SCM device is unable to persist memory contents in certain conditions */ -#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7)) -/* SCM device is encrypted */ -#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8)) -/* SCM device has been scrubbed and locked */ -#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9)) - -/* Bits status indicators for health bitmap indicating unarmed dimm */ -#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -/* Bits status indicators for health bitmap indicating unflushed dimm */ -#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY) - -/* Bits status indicators for health bitmap indicating unrestored dimm */ -#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY) - -/* Bit status indicators for smart event notification */ -#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \ - PAPR_PMEM_HEALTH_FATAL | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS) -#define PAPR_SCM_PERF_STATS_VERSION 0x1 - /* Struct holding a single performance metric */ struct papr_scm_perf_stat { u8 stat_id[8]; @@ -124,9 +86,6 @@ struct papr_scm_priv { /* The bits which needs to be overridden */ u64 health_bitmap_inject_mask; - - /* array to have event_code and stat_id mappings */ - u8 *nvdimm_events_map; }; static int papr_scm_pmem_flush(struct nd_region *nd_region, @@ -350,18 +309,41 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p, #ifdef CONFIG_PERF_EVENTS #define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu) +static const char * const nvdimm_events_map[] = { + [1] = "CtlResCt", + [2] = "CtlResTm", + [3] = "PonSecs ", + [4] = "MemLife ", + [5] = "CritRscU", + [6] = "HostLCnt", + [7] = "HostSCnt", + [8] = "HostSDur", + [9] = "HostLDur", + [10] = "MedRCnt ", + [11] = "MedWCnt ", + [12] = "MedRDur ", + [13] = "MedWDur ", + [14] = "CchRHCnt", + [15] = "CchWHCnt", + [16] = "FastWCnt", +}; + static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count) { struct papr_scm_perf_stat *stat; struct papr_scm_perf_stats *stats; - struct papr_scm_priv *p = (struct papr_scm_priv *)dev->driver_data; + struct papr_scm_priv *p = dev_get_drvdata(dev); int rc, size; + /* Invalid eventcode */ + if (event->attr.config == 0 || event->attr.config >= ARRAY_SIZE(nvdimm_events_map)) + return -EINVAL; + /* Allocate request buffer enough to hold single performance stat */ size = sizeof(struct papr_scm_perf_stats) + sizeof(struct papr_scm_perf_stat); - if (!p || !p->nvdimm_events_map) + if (!p) return -EINVAL; stats = kzalloc(size, GFP_KERNEL); @@ -370,7 +352,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, stat = &stats->scm_statistic[0]; memcpy(&stat->stat_id, - &p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)], + nvdimm_events_map[event->attr.config], sizeof(stat->stat_id)); stat->stat_val = 0; @@ -458,56 +440,6 @@ static void papr_scm_pmu_del(struct perf_event *event, int flags) papr_scm_pmu_read(event); } -static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu) -{ - struct papr_scm_perf_stat *stat; - struct papr_scm_perf_stats *stats; - u32 available_events; - int index, rc = 0; - - if (!p->stat_buffer_len) - return -ENOENT; - - available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats)) - / sizeof(struct papr_scm_perf_stat); - if (available_events == 0) - return -EOPNOTSUPP; - - /* Allocate the buffer for phyp where stats are written */ - stats = kzalloc(p->stat_buffer_len, GFP_KERNEL); - if (!stats) { - rc = -ENOMEM; - return rc; - } - - /* Called to get list of events supported */ - rc = drc_pmem_query_stats(p, stats, 0); - if (rc) - goto out; - - /* - * Allocate memory and populate nvdimm_event_map. - * Allocate an extra element for NULL entry - */ - p->nvdimm_events_map = kcalloc(available_events + 1, - sizeof(stat->stat_id), - GFP_KERNEL); - if (!p->nvdimm_events_map) { - rc = -ENOMEM; - goto out; - } - - /* Copy all stat_ids to event map */ - for (index = 0, stat = stats->scm_statistic; - index < available_events; index++, ++stat) { - memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)], - &stat->stat_id, sizeof(stat->stat_id)); - } -out: - kfree(stats); - return rc; -} - static void papr_scm_pmu_register(struct papr_scm_priv *p) { struct nvdimm_pmu *nd_pmu; @@ -519,9 +451,10 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p) goto pmu_err_print; } - rc = papr_scm_pmu_check_events(p, nd_pmu); - if (rc) + if (!p->stat_buffer_len) { + rc = -ENOENT; goto pmu_check_events_err; + } nd_pmu->pmu.task_ctx_nr = perf_invalid_context; nd_pmu->pmu.name = nvdimm_name(p->nvdimm); @@ -539,7 +472,7 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p) rc = register_nvdimm_pmu(nd_pmu, p->pdev); if (rc) - goto pmu_register_err; + goto pmu_check_events_err; /* * Set archdata.priv value to nvdimm_pmu structure, to handle the @@ -548,8 +481,6 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p) p->pdev->archdata.priv = nd_pmu; return; -pmu_register_err: - kfree(p->nvdimm_events_map); pmu_check_events_err: kfree(nd_pmu); pmu_err_print: @@ -613,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p) /* Jiffies offset for which the health data is assumed to be same */ cache_timeout = p->lasthealth_jiffies + - msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000); + secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL); /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */ if (time_after(jiffies, cache_timeout)) @@ -1459,6 +1390,13 @@ static int papr_scm_probe(struct platform_device *pdev) return -ENODEV; } + /* + * open firmware platform device create won't update the NUMA + * distance table. For PAPR SCM devices we use numa_map_to_online_node() + * to find the nearest online NUMA node and that requires correct + * distance table information. + */ + update_numa_distance(dn); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) @@ -1545,7 +1483,7 @@ err: kfree(p); return rc; } -static int papr_scm_remove(struct platform_device *pdev) +static void papr_scm_remove(struct platform_device *pdev) { struct papr_scm_priv *p = platform_get_drvdata(pdev); @@ -1560,11 +1498,8 @@ static int papr_scm_remove(struct platform_device *pdev) unregister_nvdimm_pmu(pdev->archdata.priv); pdev->archdata.priv = NULL; - kfree(p->nvdimm_events_map); kfree(p->bus_desc.provider_name); kfree(p); - - return 0; } static const struct of_device_id papr_scm_match[] = { @@ -1602,5 +1537,6 @@ static void __exit papr_scm_exit(void) module_exit(papr_scm_exit); MODULE_DEVICE_TABLE(of, papr_scm_match); +MODULE_DESCRIPTION("PAPR Storage Class Memory interface driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("IBM Corporation"); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 6e671c3809ec..6dbc73eb2ca2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -18,33 +18,6 @@ #include <asm/pci.h> #include "pseries.h" -#if 0 -void pcibios_name_device(struct pci_dev *dev) -{ - struct device_node *dn; - - /* - * Add IBM loc code (slot) as a prefix to the device names for service - */ - dn = pci_device_to_OF_node(dev); - if (dn) { - const char *loc_code = of_get_property(dn, "ibm,loc-code", - NULL); - if (loc_code) { - int loc_len = strlen(loc_code); - if (loc_len < sizeof(dev->dev.name)) { - memmove(dev->dev.name+loc_len+1, dev->dev.name, - sizeof(dev->dev.name)-loc_len-1); - memcpy(dev->dev.name, loc_code, loc_len); - dev->dev.name[loc_len] = ' '; - dev->dev.name[sizeof(dev->dev.name)-1] = '\0'; - } - } - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); -#endif - #ifdef CONFIG_PCI_IOV #define MAX_VFS_FOR_MAP_PE 256 struct pe_map_bar_entry { @@ -60,7 +33,7 @@ static int pseries_send_map_pe(struct pci_dev *pdev, u16 num_vfs, struct pci_dn *pdn; int rc; unsigned long buid, addr; - int ibm_map_pes = rtas_token("ibm,open-sriov-map-pe-number"); + int ibm_map_pes = rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_MAP_PE_NUMBER); if (ibm_map_pes == RTAS_UNKNOWN_SERVICE) return -EINVAL; @@ -240,7 +213,7 @@ void __init pSeries_final_fixup(void) */ static void fixup_winbond_82c105(struct pci_dev* dev) { - int i; + struct resource *r; unsigned int reg; if (!machine_is(pseries)) @@ -251,14 +224,14 @@ static void fixup_winbond_82c105(struct pci_dev* dev) /* Enable LEGIRQ to use INTC instead of ISA interrupts */ pci_write_config_dword(dev, 0x40, reg | (1<<11)); - for (i = 0; i < DEVICE_COUNT_RESOURCE; ++i) { + pci_dev_for_each_resource(dev, r) { /* zap the 2nd function of the winbond chip */ - if (dev->resource[i].flags & IORESOURCE_IO - && dev->bus->number == 0 && dev->devfn == 0x81) - dev->resource[i].flags &= ~IORESOURCE_IO; - if (dev->resource[i].start == 0 && dev->resource[i].end) { - dev->resource[i].flags = 0; - dev->resource[i].end = 0; + if (dev->bus->number == 0 && dev->devfn == 0x81 && + r->flags & IORESOURCE_IO) + r->flags &= ~IORESOURCE_IO; + if (r->start == 0 && r->end) { + r->flags = 0; + r->end = 0; } } } diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 4ba824568119..52e2623a741d 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/export.h> +#include <linux/node.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> @@ -21,9 +22,22 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn) { struct pci_controller *phb; + int nid; pr_debug("PCI: Initializing new hotplug PHB %pOF\n", dn); + nid = of_node_to_nid(dn); + if (likely((nid) >= 0)) { + if (!node_online(nid)) { + if (__register_one_node(nid)) { + pr_err("PCI: Failed to register node %d\n", nid); + } else { + update_numa_distance(dn); + node_set_online(nid); + } + } + } + phb = pcibios_alloc_controller(dn); if (!phb) return NULL; @@ -35,6 +49,8 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn) pseries_msi_allocate_domains(phb); + ppc_iommu_register_device(phb); + /* Create EEH devices for the PHB */ eeh_phb_pe_create(phb); @@ -76,6 +92,8 @@ int remove_phb_dynamic(struct pci_controller *phb) } } + ppc_iommu_unregister_device(phb); + pseries_msi_free_domains(phb); /* Keep a reference so phb isn't freed yet */ diff --git a/arch/powerpc/platforms/pseries/plpks-secvar.c b/arch/powerpc/platforms/pseries/plpks-secvar.c new file mode 100644 index 000000000000..257fd1f8bc19 --- /dev/null +++ b/arch/powerpc/platforms/pseries/plpks-secvar.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0-only + +// Secure variable implementation using the PowerVM LPAR Platform KeyStore (PLPKS) +// +// Copyright 2022, 2023 IBM Corporation +// Authors: Russell Currey +// Andrew Donnellan +// Nayna Jain + +#define pr_fmt(fmt) "secvar: "fmt + +#include <linux/printk.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/kobject.h> +#include <linux/nls.h> +#include <asm/machdep.h> +#include <asm/secvar.h> +#include <asm/plpks.h> + +// Config attributes for sysfs +#define PLPKS_CONFIG_ATTR(name, fmt, func) \ + static ssize_t name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *buf) \ + { \ + return sysfs_emit(buf, fmt, func()); \ + } \ + static struct kobj_attribute attr_##name = __ATTR_RO(name) + +PLPKS_CONFIG_ATTR(version, "%u\n", plpks_get_version); +PLPKS_CONFIG_ATTR(max_object_size, "%u\n", plpks_get_maxobjectsize); +PLPKS_CONFIG_ATTR(total_size, "%u\n", plpks_get_totalsize); +PLPKS_CONFIG_ATTR(used_space, "%u\n", plpks_get_usedspace); +PLPKS_CONFIG_ATTR(supported_policies, "%08x\n", plpks_get_supportedpolicies); +PLPKS_CONFIG_ATTR(signed_update_algorithms, "%016llx\n", plpks_get_signedupdatealgorithms); + +static const struct attribute *config_attrs[] = { + &attr_version.attr, + &attr_max_object_size.attr, + &attr_total_size.attr, + &attr_used_space.attr, + &attr_supported_policies.attr, + &attr_signed_update_algorithms.attr, + NULL, +}; + +static u32 get_policy(const char *name) +{ + if ((strcmp(name, "db") == 0) || + (strcmp(name, "dbx") == 0) || + (strcmp(name, "grubdb") == 0) || + (strcmp(name, "grubdbx") == 0) || + (strcmp(name, "sbat") == 0)) + return (PLPKS_WORLDREADABLE | PLPKS_SIGNEDUPDATE); + else + return PLPKS_SIGNEDUPDATE; +} + +static const char * const plpks_var_names[] = { + "PK", + "KEK", + "db", + "dbx", + "grubdb", + "grubdbx", + "sbat", + "moduledb", + "trustedcadb", + NULL, +}; + +static int plpks_get_variable(const char *key, u64 key_len, u8 *data, + u64 *data_size) +{ + struct plpks_var var = {0}; + int rc = 0; + + // We subtract 1 from key_len because we don't need to include the + // null terminator at the end of the string + var.name = kcalloc(key_len - 1, sizeof(wchar_t), GFP_KERNEL); + if (!var.name) + return -ENOMEM; + rc = utf8s_to_utf16s(key, key_len - 1, UTF16_LITTLE_ENDIAN, (wchar_t *)var.name, + key_len - 1); + if (rc < 0) + goto err; + var.namelen = rc * 2; + + var.os = PLPKS_VAR_LINUX; + if (data) { + var.data = data; + var.datalen = *data_size; + } + rc = plpks_read_os_var(&var); + + if (rc) + goto err; + + *data_size = var.datalen; + +err: + kfree(var.name); + if (rc && rc != -ENOENT) { + pr_err("Failed to read variable '%s': %d\n", key, rc); + // Return -EIO since userspace probably doesn't care about the + // specific error + rc = -EIO; + } + return rc; +} + +static int plpks_set_variable(const char *key, u64 key_len, u8 *data, + u64 data_size) +{ + struct plpks_var var = {0}; + int rc = 0; + u64 flags; + + // Secure variables need to be prefixed with 8 bytes of flags. + // We only want to perform the write if we have at least one byte of data. + if (data_size <= sizeof(flags)) + return -EINVAL; + + // We subtract 1 from key_len because we don't need to include the + // null terminator at the end of the string + var.name = kcalloc(key_len - 1, sizeof(wchar_t), GFP_KERNEL); + if (!var.name) + return -ENOMEM; + rc = utf8s_to_utf16s(key, key_len - 1, UTF16_LITTLE_ENDIAN, (wchar_t *)var.name, + key_len - 1); + if (rc < 0) + goto err; + var.namelen = rc * 2; + + // Flags are contained in the first 8 bytes of the buffer, and are always big-endian + flags = be64_to_cpup((__be64 *)data); + + var.datalen = data_size - sizeof(flags); + var.data = data + sizeof(flags); + var.os = PLPKS_VAR_LINUX; + var.policy = get_policy(key); + + // Unlike in the read case, the plpks error code can be useful to + // userspace on write, so we return it rather than just -EIO + rc = plpks_signed_update_var(&var, flags); + +err: + kfree(var.name); + return rc; +} + +// PLPKS dynamic secure boot doesn't give us a format string in the same way OPAL does. +// Instead, report the format using the SB_VERSION variable in the keystore. +// The string is made up by us, and takes the form "ibm,plpks-sb-v<n>" (or "ibm,plpks-sb-unknown" +// if the SB_VERSION variable doesn't exist). Hypervisor defines the SB_VERSION variable as a +// "1 byte unsigned integer value". +static ssize_t plpks_secvar_format(char *buf, size_t bufsize) +{ + struct plpks_var var = {0}; + ssize_t ret; + u8 version; + + var.component = NULL; + // Only the signed variables have null bytes in their names, this one doesn't + var.name = "SB_VERSION"; + var.namelen = strlen(var.name); + var.datalen = 1; + var.data = &version; + + // Unlike the other vars, SB_VERSION is owned by firmware instead of the OS + ret = plpks_read_fw_var(&var); + if (ret) { + if (ret == -ENOENT) { + ret = snprintf(buf, bufsize, "ibm,plpks-sb-unknown"); + } else { + pr_err("Error %ld reading SB_VERSION from firmware\n", ret); + ret = -EIO; + } + goto err; + } + + ret = snprintf(buf, bufsize, "ibm,plpks-sb-v%hhu", version); +err: + return ret; +} + +static int plpks_max_size(u64 *max_size) +{ + // The max object size reported by the hypervisor is accurate for the + // object itself, but we use the first 8 bytes of data on write as the + // signed update flags, so the max size a user can write is larger. + *max_size = (u64)plpks_get_maxobjectsize() + sizeof(u64); + + return 0; +} + + +static const struct secvar_operations plpks_secvar_ops = { + .get = plpks_get_variable, + .set = plpks_set_variable, + .format = plpks_secvar_format, + .max_size = plpks_max_size, + .config_attrs = config_attrs, + .var_names = plpks_var_names, +}; + +static int plpks_secvar_init(void) +{ + if (!plpks_is_available()) + return -ENODEV; + + return set_secvar_ops(&plpks_secvar_ops); +} +machine_device_initcall(pseries, plpks_secvar_init); diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c new file mode 100644 index 000000000000..b1667ed05f98 --- /dev/null +++ b/arch/powerpc/platforms/pseries/plpks.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * POWER LPAR Platform KeyStore(PLPKS) + * Copyright (C) 2022 IBM Corporation + * Author: Nayna Jain <nayna@linux.ibm.com> + * + * Provides access to variables stored in Power LPAR Platform KeyStore(PLPKS). + */ + +#define pr_fmt(fmt) "plpks: " fmt + +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/of_fdt.h> +#include <linux/libfdt.h> +#include <linux/memblock.h> +#include <asm/hvcall.h> +#include <asm/machdep.h> +#include <asm/plpks.h> +#include <asm/firmware.h> + +static u8 *ospassword; +static u16 ospasswordlength; + +// Retrieved with H_PKS_GET_CONFIG +static u8 version; +static u16 objoverhead; +static u16 maxpwsize; +static u16 maxobjsize; +static s16 maxobjlabelsize; +static u32 totalsize; +static u32 usedspace; +static u32 supportedpolicies; +static u32 maxlargeobjectsize; +static u64 signedupdatealgorithms; + +struct plpks_auth { + u8 version; + u8 consumer; + __be64 rsvd0; + __be32 rsvd1; + __be16 passwordlength; + u8 password[]; +} __packed __aligned(16); + +struct label_attr { + u8 prefix[8]; + u8 version; + u8 os; + u8 length; + u8 reserved[5]; +}; + +struct label { + struct label_attr attr; + u8 name[PLPKS_MAX_NAME_SIZE]; + size_t size; +}; + +static int pseries_status_to_err(int rc) +{ + int err; + + switch (rc) { + case H_SUCCESS: + err = 0; + break; + case H_FUNCTION: + err = -ENXIO; + break; + case H_PARAMETER: + case H_P2: + case H_P3: + case H_P4: + case H_P5: + case H_P6: + err = -EINVAL; + break; + case H_NOT_FOUND: + err = -ENOENT; + break; + case H_BUSY: + case H_LONG_BUSY_ORDER_1_MSEC: + case H_LONG_BUSY_ORDER_10_MSEC: + case H_LONG_BUSY_ORDER_100_MSEC: + case H_LONG_BUSY_ORDER_1_SEC: + case H_LONG_BUSY_ORDER_10_SEC: + case H_LONG_BUSY_ORDER_100_SEC: + err = -EBUSY; + break; + case H_AUTHORITY: + err = -EPERM; + break; + case H_NO_MEM: + err = -ENOMEM; + break; + case H_RESOURCE: + err = -EEXIST; + break; + case H_TOO_BIG: + err = -EFBIG; + break; + case H_STATE: + err = -EIO; + break; + case H_R_STATE: + err = -EIO; + break; + case H_IN_USE: + err = -EEXIST; + break; + case H_ABORTED: + err = -EIO; + break; + default: + err = -EINVAL; + } + + pr_debug("Converted hypervisor code %d to Linux %d\n", rc, err); + + return err; +} + +static int plpks_gen_password(void) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 }; + u8 *password, consumer = PLPKS_OS_OWNER; + int rc; + + // If we booted from kexec, we could be reusing an existing password already + if (ospassword) { + pr_debug("Password of length %u already in use\n", ospasswordlength); + return 0; + } + + // The password must not cross a page boundary, so we align to the next power of 2 + password = kzalloc(roundup_pow_of_two(maxpwsize), GFP_KERNEL); + if (!password) + return -ENOMEM; + + rc = plpar_hcall(H_PKS_GEN_PASSWORD, retbuf, consumer, 0, + virt_to_phys(password), maxpwsize); + + if (!rc) { + ospasswordlength = maxpwsize; + ospassword = kzalloc(maxpwsize, GFP_KERNEL); + if (!ospassword) { + kfree_sensitive(password); + return -ENOMEM; + } + memcpy(ospassword, password, ospasswordlength); + } else { + if (rc == H_IN_USE) { + pr_warn("Password already set - authenticated operations will fail\n"); + rc = 0; + } else { + goto out; + } + } +out: + kfree_sensitive(password); + + return pseries_status_to_err(rc); +} + +static struct plpks_auth *construct_auth(u8 consumer) +{ + struct plpks_auth *auth; + + if (consumer > PLPKS_OS_OWNER) + return ERR_PTR(-EINVAL); + + // The auth structure must not cross a page boundary and must be + // 16 byte aligned. We align to the next largest power of 2 + auth = kzalloc(roundup_pow_of_two(struct_size(auth, password, maxpwsize)), GFP_KERNEL); + if (!auth) + return ERR_PTR(-ENOMEM); + + auth->version = 1; + auth->consumer = consumer; + + if (consumer == PLPKS_FW_OWNER || consumer == PLPKS_BOOTLOADER_OWNER) + return auth; + + memcpy(auth->password, ospassword, ospasswordlength); + + auth->passwordlength = cpu_to_be16(ospasswordlength); + + return auth; +} + +/* + * Label is combination of label attributes + name. + * Label attributes are used internally by kernel and not exposed to the user. + */ +static struct label *construct_label(char *component, u8 varos, u8 *name, + u16 namelen) +{ + struct label *label; + size_t slen = 0; + + if (!name || namelen > PLPKS_MAX_NAME_SIZE) + return ERR_PTR(-EINVAL); + + // Support NULL component for signed updates + if (component) { + slen = strlen(component); + if (slen > sizeof(label->attr.prefix)) + return ERR_PTR(-EINVAL); + } + + // The label structure must not cross a page boundary, so we align to the next power of 2 + label = kzalloc(roundup_pow_of_two(sizeof(*label)), GFP_KERNEL); + if (!label) + return ERR_PTR(-ENOMEM); + + if (component) + memcpy(&label->attr.prefix, component, slen); + + label->attr.version = PLPKS_LABEL_VERSION; + label->attr.os = varos; + label->attr.length = PLPKS_MAX_LABEL_ATTR_SIZE; + memcpy(&label->name, name, namelen); + + label->size = sizeof(struct label_attr) + namelen; + + return label; +} + +static int _plpks_get_config(void) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 }; + struct config { + u8 version; + u8 flags; + __be16 rsvd0; + __be16 objoverhead; + __be16 maxpwsize; + __be16 maxobjlabelsize; + __be16 maxobjsize; + __be32 totalsize; + __be32 usedspace; + __be32 supportedpolicies; + __be32 maxlargeobjectsize; + __be64 signedupdatealgorithms; + u8 rsvd1[476]; + } __packed * config; + size_t size; + int rc = 0; + + size = sizeof(*config); + + // Config struct must not cross a page boundary. So long as the struct + // size is a power of 2, this should be fine as alignment is guaranteed + config = kzalloc(size, GFP_KERNEL); + if (!config) { + rc = -ENOMEM; + goto err; + } + + rc = plpar_hcall(H_PKS_GET_CONFIG, retbuf, virt_to_phys(config), size); + + if (rc != H_SUCCESS) { + rc = pseries_status_to_err(rc); + goto err; + } + + version = config->version; + objoverhead = be16_to_cpu(config->objoverhead); + maxpwsize = be16_to_cpu(config->maxpwsize); + maxobjsize = be16_to_cpu(config->maxobjsize); + maxobjlabelsize = be16_to_cpu(config->maxobjlabelsize); + totalsize = be32_to_cpu(config->totalsize); + usedspace = be32_to_cpu(config->usedspace); + supportedpolicies = be32_to_cpu(config->supportedpolicies); + maxlargeobjectsize = be32_to_cpu(config->maxlargeobjectsize); + signedupdatealgorithms = be64_to_cpu(config->signedupdatealgorithms); + + // Validate that the numbers we get back match the requirements of the spec + if (maxpwsize < 32) { + pr_err("Invalid Max Password Size received from hypervisor (%d < 32)\n", maxpwsize); + rc = -EIO; + goto err; + } + + if (maxobjlabelsize < 255) { + pr_err("Invalid Max Object Label Size received from hypervisor (%d < 255)\n", + maxobjlabelsize); + rc = -EIO; + goto err; + } + + if (totalsize < 4096) { + pr_err("Invalid Total Size received from hypervisor (%d < 4096)\n", totalsize); + rc = -EIO; + goto err; + } + + if (version >= 3 && maxlargeobjectsize >= 65536 && maxobjsize != 0xFFFF) { + pr_err("Invalid Max Object Size (0x%x != 0xFFFF)\n", maxobjsize); + rc = -EIO; + goto err; + } + +err: + kfree(config); + return rc; +} + +u8 plpks_get_version(void) +{ + return version; +} + +u16 plpks_get_objoverhead(void) +{ + return objoverhead; +} + +u16 plpks_get_maxpwsize(void) +{ + return maxpwsize; +} + +u16 plpks_get_maxobjectsize(void) +{ + return maxobjsize; +} + +u16 plpks_get_maxobjectlabelsize(void) +{ + return maxobjlabelsize; +} + +u32 plpks_get_totalsize(void) +{ + return totalsize; +} + +u32 plpks_get_usedspace(void) +{ + // Unlike other config values, usedspace regularly changes as objects + // are updated, so we need to refresh. + int rc = _plpks_get_config(); + if (rc) { + pr_err("Couldn't get config, rc: %d\n", rc); + return 0; + } + return usedspace; +} + +u32 plpks_get_supportedpolicies(void) +{ + return supportedpolicies; +} + +u32 plpks_get_maxlargeobjectsize(void) +{ + return maxlargeobjectsize; +} + +u64 plpks_get_signedupdatealgorithms(void) +{ + return signedupdatealgorithms; +} + +u16 plpks_get_passwordlen(void) +{ + return ospasswordlength; +} + +bool plpks_is_available(void) +{ + int rc; + + if (!firmware_has_feature(FW_FEATURE_PLPKS)) + return false; + + rc = _plpks_get_config(); + if (rc) + return false; + + return true; +} + +static int plpks_confirm_object_flushed(struct label *label, + struct plpks_auth *auth) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 }; + bool timed_out = true; + u64 timeout = 0; + u8 status; + int rc; + + do { + rc = plpar_hcall(H_PKS_CONFIRM_OBJECT_FLUSHED, retbuf, + virt_to_phys(auth), virt_to_phys(label), + label->size); + + status = retbuf[0]; + if (rc) { + timed_out = false; + if (rc == H_NOT_FOUND && status == 1) + rc = 0; + break; + } + + if (!rc && status == 1) { + timed_out = false; + break; + } + + fsleep(PLPKS_FLUSH_SLEEP); + timeout = timeout + PLPKS_FLUSH_SLEEP; + } while (timeout < PLPKS_MAX_TIMEOUT); + + if (timed_out) + return -ETIMEDOUT; + + return pseries_status_to_err(rc); +} + +int plpks_signed_update_var(struct plpks_var *var, u64 flags) +{ + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; + int rc; + struct label *label; + struct plpks_auth *auth; + u64 continuetoken = 0; + u64 timeout = 0; + + if (!var->data || var->datalen <= 0 || var->namelen > PLPKS_MAX_NAME_SIZE) + return -EINVAL; + + if (!(var->policy & PLPKS_SIGNEDUPDATE)) + return -EINVAL; + + // Signed updates need the component to be NULL. + if (var->component) + return -EINVAL; + + auth = construct_auth(PLPKS_OS_OWNER); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + label = construct_label(var->component, var->os, var->name, var->namelen); + if (IS_ERR(label)) { + rc = PTR_ERR(label); + goto out; + } + + do { + rc = plpar_hcall9(H_PKS_SIGNED_UPDATE, retbuf, + virt_to_phys(auth), virt_to_phys(label), + label->size, var->policy, flags, + virt_to_phys(var->data), var->datalen, + continuetoken); + + continuetoken = retbuf[0]; + if (pseries_status_to_err(rc) == -EBUSY) { + int delay_us = get_longbusy_msecs(rc) * 1000; + + fsleep(delay_us); + timeout += delay_us; + } + rc = pseries_status_to_err(rc); + } while (rc == -EBUSY && timeout < PLPKS_MAX_TIMEOUT); + + if (!rc) + rc = plpks_confirm_object_flushed(label, auth); + + kfree(label); +out: + kfree(auth); + + return rc; +} + +int plpks_write_var(struct plpks_var var) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 }; + struct plpks_auth *auth; + struct label *label; + int rc; + + if (!var.component || !var.data || var.datalen <= 0 || + var.namelen > PLPKS_MAX_NAME_SIZE || var.datalen > PLPKS_MAX_DATA_SIZE) + return -EINVAL; + + if (var.policy & PLPKS_SIGNEDUPDATE) + return -EINVAL; + + auth = construct_auth(PLPKS_OS_OWNER); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + label = construct_label(var.component, var.os, var.name, var.namelen); + if (IS_ERR(label)) { + rc = PTR_ERR(label); + goto out; + } + + rc = plpar_hcall(H_PKS_WRITE_OBJECT, retbuf, virt_to_phys(auth), + virt_to_phys(label), label->size, var.policy, + virt_to_phys(var.data), var.datalen); + + if (!rc) + rc = plpks_confirm_object_flushed(label, auth); + + rc = pseries_status_to_err(rc); + kfree(label); +out: + kfree(auth); + + return rc; +} + +int plpks_remove_var(char *component, u8 varos, struct plpks_var_name vname) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 }; + struct plpks_auth *auth; + struct label *label; + int rc; + + if (vname.namelen > PLPKS_MAX_NAME_SIZE) + return -EINVAL; + + auth = construct_auth(PLPKS_OS_OWNER); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + label = construct_label(component, varos, vname.name, vname.namelen); + if (IS_ERR(label)) { + rc = PTR_ERR(label); + goto out; + } + + rc = plpar_hcall(H_PKS_REMOVE_OBJECT, retbuf, virt_to_phys(auth), + virt_to_phys(label), label->size); + + if (!rc) + rc = plpks_confirm_object_flushed(label, auth); + + rc = pseries_status_to_err(rc); + kfree(label); +out: + kfree(auth); + + return rc; +} + +static int plpks_read_var(u8 consumer, struct plpks_var *var) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 }; + struct plpks_auth *auth; + struct label *label = NULL; + u8 *output; + int rc; + + if (var->namelen > PLPKS_MAX_NAME_SIZE) + return -EINVAL; + + auth = construct_auth(consumer); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + if (consumer == PLPKS_OS_OWNER) { + label = construct_label(var->component, var->os, var->name, + var->namelen); + if (IS_ERR(label)) { + rc = PTR_ERR(label); + goto out_free_auth; + } + } + + output = kzalloc(maxobjsize, GFP_KERNEL); + if (!output) { + rc = -ENOMEM; + goto out_free_label; + } + + if (consumer == PLPKS_OS_OWNER) + rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth), + virt_to_phys(label), label->size, virt_to_phys(output), + maxobjsize); + else + rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth), + virt_to_phys(var->name), var->namelen, virt_to_phys(output), + maxobjsize); + + + if (rc != H_SUCCESS) { + rc = pseries_status_to_err(rc); + goto out_free_output; + } + + if (!var->data || var->datalen > retbuf[0]) + var->datalen = retbuf[0]; + + var->policy = retbuf[1]; + + if (var->data) + memcpy(var->data, output, var->datalen); + + rc = 0; + +out_free_output: + kfree(output); +out_free_label: + kfree(label); +out_free_auth: + kfree(auth); + + return rc; +} + +int plpks_read_os_var(struct plpks_var *var) +{ + return plpks_read_var(PLPKS_OS_OWNER, var); +} + +int plpks_read_fw_var(struct plpks_var *var) +{ + return plpks_read_var(PLPKS_FW_OWNER, var); +} + +int plpks_read_bootloader_var(struct plpks_var *var) +{ + return plpks_read_var(PLPKS_BOOTLOADER_OWNER, var); +} + +int plpks_populate_fdt(void *fdt) +{ + int chosen_offset = fdt_path_offset(fdt, "/chosen"); + + if (chosen_offset < 0) { + pr_err("Can't find chosen node: %s\n", + fdt_strerror(chosen_offset)); + return chosen_offset; + } + + return fdt_setprop(fdt, chosen_offset, "ibm,plpks-pw", ospassword, ospasswordlength); +} + +// Once a password is registered with the hypervisor it cannot be cleared without +// rebooting the LPAR, so to keep using the PLPKS across kexec boots we need to +// recover the previous password from the FDT. +// +// There are a few challenges here. We don't want the password to be visible to +// users, so we need to clear it from the FDT. This has to be done in early boot. +// Clearing it from the FDT would make the FDT's checksum invalid, so we have to +// manually cause the checksum to be recalculated. +void __init plpks_early_init_devtree(void) +{ + void *fdt = initial_boot_params; + int chosen_node = fdt_path_offset(fdt, "/chosen"); + const u8 *password; + int len; + + if (chosen_node < 0) + return; + + password = fdt_getprop(fdt, chosen_node, "ibm,plpks-pw", &len); + if (len <= 0) { + pr_debug("Couldn't find ibm,plpks-pw node.\n"); + return; + } + + ospassword = memblock_alloc_raw(len, SMP_CACHE_BYTES); + if (!ospassword) { + pr_err("Error allocating memory for password.\n"); + goto out; + } + + memcpy(ospassword, password, len); + ospasswordlength = (u16)len; + +out: + fdt_nop_property(fdt, chosen_node, "ibm,plpks-pw"); + // Since we've cleared the password, we must update the FDT checksum + early_init_dt_verify(fdt, __pa(fdt)); +} + +static __init int pseries_plpks_init(void) +{ + int rc; + + if (!firmware_has_feature(FW_FEATURE_PLPKS)) + return -ENODEV; + + rc = _plpks_get_config(); + + if (rc) { + pr_err("POWER LPAR Platform KeyStore is not supported or enabled\n"); + return rc; + } + + rc = plpks_gen_password(); + if (rc) + pr_err("Failed setting POWER LPAR Platform KeyStore Password\n"); + else + pr_info("POWER LPAR Platform KeyStore initialized successfully\n"); + + return rc; +} +machine_arch_initcall(pseries, pseries_plpks_init); diff --git a/arch/powerpc/platforms/pseries/plpks_sed_ops.c b/arch/powerpc/platforms/pseries/plpks_sed_ops.c new file mode 100644 index 000000000000..7c873c9589ef --- /dev/null +++ b/arch/powerpc/platforms/pseries/plpks_sed_ops.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * POWER Platform specific code for non-volatile SED key access + * Copyright (C) 2022 IBM Corporation + * + * Define operations for SED Opal to read/write keys + * from POWER LPAR Platform KeyStore(PLPKS). + * + * Self Encrypting Drives(SED) key storage using PLPKS + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/ioctl.h> +#include <linux/sed-opal-key.h> +#include <asm/plpks.h> + +static bool plpks_sed_initialized = false; +static bool plpks_sed_available = false; + +/* + * structure that contains all SED data + */ +struct plpks_sed_object_data { + u_char version; + u_char pad1[7]; + u_long authority; + u_long range; + u_int key_len; + u_char key[32]; +}; + +#define PLPKS_SED_OBJECT_DATA_V0 0 +#define PLPKS_SED_MANGLED_LABEL "/default/pri" +#define PLPKS_SED_COMPONENT "sed-opal" +#define PLPKS_SED_KEY "opal-boot-pin" + +/* + * authority is admin1 and range is global + */ +#define PLPKS_SED_AUTHORITY 0x0000000900010001 +#define PLPKS_SED_RANGE 0x0000080200000001 + +static void plpks_init_var(struct plpks_var *var, char *keyname) +{ + if (!plpks_sed_initialized) { + plpks_sed_initialized = true; + plpks_sed_available = plpks_is_available(); + if (!plpks_sed_available) + pr_err("SED: plpks not available\n"); + } + + var->name = keyname; + var->namelen = strlen(keyname); + if (strcmp(PLPKS_SED_KEY, keyname) == 0) { + var->name = PLPKS_SED_MANGLED_LABEL; + var->namelen = strlen(keyname); + } + var->policy = PLPKS_WORLDREADABLE; + var->os = PLPKS_VAR_COMMON; + var->data = NULL; + var->datalen = 0; + var->component = PLPKS_SED_COMPONENT; +} + +/* + * Read the SED Opal key from PLPKS given the label + */ +int sed_read_key(char *keyname, char *key, u_int *keylen) +{ + struct plpks_var var; + struct plpks_sed_object_data data; + int ret; + u_int len; + + plpks_init_var(&var, keyname); + + if (!plpks_sed_available) + return -EOPNOTSUPP; + + var.data = (u8 *)&data; + var.datalen = sizeof(data); + + ret = plpks_read_os_var(&var); + if (ret != 0) + return ret; + + len = min_t(u16, be32_to_cpu(data.key_len), var.datalen); + memcpy(key, data.key, len); + key[len] = '\0'; + *keylen = len; + + return 0; +} + +/* + * Write the SED Opal key to PLPKS given the label + */ +int sed_write_key(char *keyname, char *key, u_int keylen) +{ + struct plpks_var var; + struct plpks_sed_object_data data; + struct plpks_var_name vname; + + plpks_init_var(&var, keyname); + + if (!plpks_sed_available) + return -EOPNOTSUPP; + + var.datalen = sizeof(struct plpks_sed_object_data); + var.data = (u8 *)&data; + + /* initialize SED object */ + data.version = PLPKS_SED_OBJECT_DATA_V0; + data.authority = cpu_to_be64(PLPKS_SED_AUTHORITY); + data.range = cpu_to_be64(PLPKS_SED_RANGE); + memset(&data.pad1, '\0', sizeof(data.pad1)); + data.key_len = cpu_to_be32(keylen); + memcpy(data.key, (char *)key, keylen); + + /* + * Key update requires remove first. The return value + * is ignored since it's okay if the key doesn't exist. + */ + vname.namelen = var.namelen; + vname.name = var.name; + plpks_remove_var(var.component, var.os, vname); + + return plpks_write_var(var); +} diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index 3c290b9ed01b..0f1d45f32e4a 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -121,7 +121,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) return -EINVAL; } - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); lock_device_hotplug(); diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 1d75b7742ef0..3968a6970fa8 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { } #endif extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary); -void pseries_machine_kexec(struct kimage *image); extern void pSeries_final_fixup(void); @@ -55,6 +54,7 @@ extern int dlpar_detach_node(struct device_node *); extern int dlpar_acquire_drc(u32 drc_index); extern int dlpar_release_drc(u32 drc_index); extern int dlpar_unisolate_drc(u32 drc_index); +extern void post_mobility_fixup(void); void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog); int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog); @@ -75,11 +75,13 @@ static inline int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) #ifdef CONFIG_HOTPLUG_CPU int dlpar_cpu(struct pseries_hp_errorlog *hp_elog); +void pseries_cpu_hotplug_init(void); #else static inline int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) { return -EOPNOTSUPP; } +static inline void pseries_cpu_hotplug_init(void) { } #endif /* PCI root bridge prepare function override for pseries */ @@ -90,8 +92,6 @@ extern struct pci_controller_ops pseries_pci_controller_ops; int pseries_msi_allocate_domains(struct pci_controller *phb); void pseries_msi_free_domains(struct pci_controller *phb); -unsigned long pseries_memory_block_size(void); - extern int CMO_PrPSP; extern int CMO_SecPSP; extern unsigned long CMO_PageSize; @@ -123,5 +123,9 @@ static inline void pseries_lpar_read_hblkrm_characteristics(void) { } #endif void pseries_rng_init(void); +#ifdef CONFIG_SPAPR_TCE_IOMMU +struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose, + struct pci_dev *pdev); +#endif #endif /* _PSERIES_PSERIES_H */ diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c index 09e98d301db0..2c661b798235 100644 --- a/arch/powerpc/platforms/pseries/pseries_energy.c +++ b/arch/powerpc/platforms/pseries/pseries_energy.c @@ -300,20 +300,22 @@ static struct device_attribute attr_percpu_deactivate_hint = static int __init pseries_energy_init(void) { int cpu, err; - struct device *cpu_dev; + struct device *cpu_dev, *dev_root; if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY)) return 0; /* H_BEST_ENERGY hcall not supported */ /* Create the sysfs files */ - err = device_create_file(cpu_subsys.dev_root, - &attr_cpu_activate_hint_list); - if (!err) - err = device_create_file(cpu_subsys.dev_root, - &attr_cpu_deactivate_hint_list); + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + err = device_create_file(dev_root, &attr_cpu_activate_hint_list); + if (!err) + err = device_create_file(dev_root, &attr_cpu_deactivate_hint_list); + put_device(dev_root); + if (err) + return err; + } - if (err) - return err; for_each_possible_cpu(cpu) { cpu_dev = get_cpu_device(cpu); err = device_create_file(cpu_dev, @@ -337,14 +339,18 @@ static int __init pseries_energy_init(void) static void __exit pseries_energy_cleanup(void) { int cpu; - struct device *cpu_dev; + struct device *cpu_dev, *dev_root; if (!sysfs_entries) return; /* Remove the sysfs files */ - device_remove_file(cpu_subsys.dev_root, &attr_cpu_activate_hint_list); - device_remove_file(cpu_subsys.dev_root, &attr_cpu_deactivate_hint_list); + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + device_remove_file(dev_root, &attr_cpu_activate_hint_list); + device_remove_file(dev_root, &attr_cpu_deactivate_hint_list); + put_device(dev_root); + } for_each_possible_cpu(cpu) { cpu_dev = get_cpu_device(cpu); diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index f12516c3998c..adafd593d9d3 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -155,7 +155,7 @@ static int __init init_ras_IRQ(void) { struct device_node *np; - ras_check_exception_token = rtas_token("check-exception"); + ras_check_exception_token = rtas_function_token(RTAS_FN_CHECK_EXCEPTION); /* Internal Errors */ np = of_find_node_by_path("/event-sources/internal-errors"); diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index cad7a0c93117..599bd2c78514 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/proc_fs.h> +#include <linux/security.h> #include <linux/slab.h> #include <linux/of.h> @@ -361,6 +362,10 @@ static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t coun char *kbuf; char *tmp; + rv = security_locked_down(LOCKDOWN_DEVICE_TREE); + if (rv) + return rv; + kbuf = memdup_user_nul(buf, count); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c index b5853e9fcc3c..eceb3289383e 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.c +++ b/arch/powerpc/platforms/pseries/rtas-fadump.c @@ -18,6 +18,7 @@ #include <asm/page.h> #include <asm/rtas.h> +#include <asm/setup.h> #include <asm/fadump.h> #include <asm/fadump-internal.h> @@ -29,9 +30,6 @@ static const struct rtas_fadump_mem_struct *fdm_active; static void rtas_fadump_update_config(struct fw_dump *fadump_conf, const struct rtas_fadump_mem_struct *fdm) { - fadump_conf->boot_mem_dest_addr = - be64_to_cpu(fdm->rmr_region.destination_address); - fadump_conf->fadumphdr_addr = (fadump_conf->boot_mem_dest_addr + fadump_conf->boot_memory_size); } @@ -43,20 +41,56 @@ static void rtas_fadump_update_config(struct fw_dump *fadump_conf, static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf, const struct rtas_fadump_mem_struct *fdm) { - fadump_conf->boot_mem_addr[0] = - be64_to_cpu(fdm->rmr_region.source_address); - fadump_conf->boot_mem_sz[0] = be64_to_cpu(fdm->rmr_region.source_len); - fadump_conf->boot_memory_size = fadump_conf->boot_mem_sz[0]; + unsigned long base, size, last_end, hole_size; - fadump_conf->boot_mem_top = fadump_conf->boot_memory_size; - fadump_conf->boot_mem_regs_cnt = 1; + last_end = 0; + hole_size = 0; + fadump_conf->boot_memory_size = 0; + fadump_conf->boot_mem_regs_cnt = 0; + pr_debug("Boot memory regions:\n"); + for (int i = 0; i < be16_to_cpu(fdm->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm->rgn[i].source_data_type); + u64 addr; - /* - * Start address of reserve dump area (permanent reservation) for - * re-registering FADump after dump capture. - */ - fadump_conf->reserve_dump_area_start = - be64_to_cpu(fdm->cpu_state_data.destination_address); + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + addr = be64_to_cpu(fdm->rgn[i].destination_address); + + fadump_conf->cpu_state_dest_vaddr = (u64)__va(addr); + /* + * Start address of reserve dump area (permanent reservation) for + * re-registering FADump after dump capture. + */ + fadump_conf->reserve_dump_area_start = addr; + break; + case RTAS_FADUMP_HPTE_REGION: + /* Not processed currently. */ + break; + case RTAS_FADUMP_REAL_MODE_REGION: + base = be64_to_cpu(fdm->rgn[i].source_address); + size = be64_to_cpu(fdm->rgn[i].source_len); + pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size); + if (!base) { + fadump_conf->boot_mem_dest_addr = + be64_to_cpu(fdm->rgn[i].destination_address); + } + + fadump_conf->boot_mem_addr[fadump_conf->boot_mem_regs_cnt] = base; + fadump_conf->boot_mem_sz[fadump_conf->boot_mem_regs_cnt] = size; + fadump_conf->boot_memory_size += size; + hole_size += (base - last_end); + last_end = base + size; + fadump_conf->boot_mem_regs_cnt++; + break; + case RTAS_FADUMP_PARAM_AREA: + fadump_conf->param_area = be64_to_cpu(fdm->rgn[i].destination_address); + break; + default: + pr_warn("Section type %d unsupported on this kernel. Ignoring!\n", type); + break; + } + } + fadump_conf->boot_mem_top = fadump_conf->boot_memory_size + hole_size; rtas_fadump_update_config(fadump_conf, fdm); } @@ -64,16 +98,15 @@ static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf, static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) { u64 addr = fadump_conf->reserve_dump_area_start; + u16 sec_cnt = 0; memset(&fdm, 0, sizeof(struct rtas_fadump_mem_struct)); addr = addr & PAGE_MASK; fdm.header.dump_format_version = cpu_to_be32(0x00000001); - fdm.header.dump_num_sections = cpu_to_be16(3); fdm.header.dump_status_flag = 0; fdm.header.offset_first_dump_section = - cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct, - cpu_state_data)); + cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct, rgn)); /* * Fields for disk dump option. @@ -89,25 +122,22 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) /* Kernel dump sections */ /* cpu state data section. */ - fdm.cpu_state_data.request_flag = - cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.cpu_state_data.source_data_type = - cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA); - fdm.cpu_state_data.source_address = 0; - fdm.cpu_state_data.source_len = - cpu_to_be64(fadump_conf->cpu_state_data_size); - fdm.cpu_state_data.destination_address = cpu_to_be64(addr); + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA); + fdm.rgn[sec_cnt].source_address = 0; + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->cpu_state_data_size); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); addr += fadump_conf->cpu_state_data_size; + sec_cnt++; /* hpte region section */ - fdm.hpte_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.hpte_region.source_data_type = - cpu_to_be16(RTAS_FADUMP_HPTE_REGION); - fdm.hpte_region.source_address = 0; - fdm.hpte_region.source_len = - cpu_to_be64(fadump_conf->hpte_region_size); - fdm.hpte_region.destination_address = cpu_to_be64(addr); + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_HPTE_REGION); + fdm.rgn[sec_cnt].source_address = 0; + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->hpte_region_size); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); addr += fadump_conf->hpte_region_size; + sec_cnt++; /* * Align boot memory area destination address to page boundary to @@ -115,14 +145,29 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) */ addr = PAGE_ALIGN(addr); - /* RMA region section */ - fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.rmr_region.source_data_type = - cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION); - fdm.rmr_region.source_address = cpu_to_be64(0); - fdm.rmr_region.source_len = cpu_to_be64(fadump_conf->boot_memory_size); - fdm.rmr_region.destination_address = cpu_to_be64(addr); - addr += fadump_conf->boot_memory_size; + /* First boot memory region destination address */ + fadump_conf->boot_mem_dest_addr = addr; + for (int i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) { + /* Boot memory regions */ + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION); + fdm.rgn[sec_cnt].source_address = cpu_to_be64(fadump_conf->boot_mem_addr[i]); + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->boot_mem_sz[i]); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); + addr += fadump_conf->boot_mem_sz[i]; + sec_cnt++; + } + + /* Parameters area */ + if (fadump_conf->param_area) { + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_PARAM_AREA); + fdm.rgn[sec_cnt].source_address = cpu_to_be64(fadump_conf->param_area); + fdm.rgn[sec_cnt].source_len = cpu_to_be64(COMMAND_LINE_SIZE); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(fadump_conf->param_area); + sec_cnt++; + } + fdm.header.dump_num_sections = cpu_to_be16(sec_cnt); rtas_fadump_update_config(fadump_conf, &fdm); @@ -136,14 +181,21 @@ static u64 rtas_fadump_get_bootmem_min(void) static int rtas_fadump_register(struct fw_dump *fadump_conf) { - unsigned int wait_time; + unsigned int wait_time, fdm_size; int rc, err = -EIO; + /* + * Platform requires the exact size of the Dump Memory Structure. + * Avoid including any unused rgns in the calculation, as this + * could result in a parameter error (-3) from the platform. + */ + fdm_size = sizeof(struct rtas_fadump_section_header); + fdm_size += be16_to_cpu(fdm.header.dump_num_sections) * sizeof(struct rtas_fadump_section); + /* TODO: Add upper time limit for the delay */ do { rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1, - NULL, FADUMP_REGISTER, &fdm, - sizeof(struct rtas_fadump_mem_struct)); + NULL, FADUMP_REGISTER, &fdm, fdm_size); wait_time = rtas_busy_delay_time(rc); if (wait_time) @@ -161,9 +213,7 @@ static int rtas_fadump_register(struct fw_dump *fadump_conf) pr_err("Failed to register. Hardware Error(%d).\n", rc); break; case -3: - if (!is_fadump_boot_mem_contiguous()) - pr_err("Can't have holes in boot memory area.\n"); - else if (!is_fadump_reserved_mem_contiguous()) + if (!is_fadump_reserved_mem_contiguous()) pr_err("Can't have holes in reserved memory area.\n"); pr_err("Failed to register. Parameter Error(%d).\n", rc); @@ -316,11 +366,9 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf) u32 num_cpus, *note_buf; int i, rc = 0, cpu = 0; struct pt_regs regs; - unsigned long addr; void *vaddr; - addr = be64_to_cpu(fdm_active->cpu_state_data.destination_address); - vaddr = __va(addr); + vaddr = (void *)fadump_conf->cpu_state_dest_vaddr; reg_header = vaddr; if (be64_to_cpu(reg_header->magic_number) != @@ -375,11 +423,8 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf) } final_note(note_buf); - if (fdh) { - pr_debug("Updating elfcore header (%llx) with cpu notes\n", - fdh->elfcorehdr_addr); - fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); - } + pr_debug("Updating elfcore header (%llx) with cpu notes\n", fadump_conf->elfcorehdr_addr); + fadump_update_elfcore_header((char *)fadump_conf->elfcorehdr_addr); return 0; error_out: @@ -389,57 +434,66 @@ error_out: } /* - * Validate and process the dump data stored by firmware before exporting - * it through '/proc/vmcore'. + * Validate and process the dump data stored by the firmware, and update + * the CPU notes of elfcorehdr. */ static int __init rtas_fadump_process(struct fw_dump *fadump_conf) { - struct fadump_crash_info_header *fdh; - int rc = 0; - if (!fdm_active || !fadump_conf->fadumphdr_addr) return -EINVAL; /* Check if the dump data is valid. */ - if ((be16_to_cpu(fdm_active->header.dump_status_flag) == - RTAS_FADUMP_ERROR_FLAG) || - (fdm_active->cpu_state_data.error_flags != 0) || - (fdm_active->rmr_region.error_flags != 0)) { - pr_err("Dump taken by platform is not valid\n"); - return -EINVAL; - } - if ((fdm_active->rmr_region.bytes_dumped != - fdm_active->rmr_region.source_len) || - !fdm_active->cpu_state_data.bytes_dumped) { - pr_err("Dump taken by platform is incomplete\n"); - return -EINVAL; - } + for (int i = 0; i < be16_to_cpu(fdm_active->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm_active->rgn[i].source_data_type); + int rc = 0; - /* Validate the fadump crash info header */ - fdh = __va(fadump_conf->fadumphdr_addr); - if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { - pr_err("Crash info header is not valid.\n"); - return -EINVAL; + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + case RTAS_FADUMP_HPTE_REGION: + case RTAS_FADUMP_REAL_MODE_REGION: + if (fdm_active->rgn[i].error_flags != 0) { + pr_err("Dump taken by platform is not valid (%d)\n", i); + rc = -EINVAL; + } + if (fdm_active->rgn[i].bytes_dumped != fdm_active->rgn[i].source_len) { + pr_err("Dump taken by platform is incomplete (%d)\n", i); + rc = -EINVAL; + } + if (rc) { + pr_warn("Region type: %u src addr: 0x%llx dest addr: 0x%llx\n", + be16_to_cpu(fdm_active->rgn[i].source_data_type), + be64_to_cpu(fdm_active->rgn[i].source_address), + be64_to_cpu(fdm_active->rgn[i].destination_address)); + return rc; + } + break; + case RTAS_FADUMP_PARAM_AREA: + if (fdm_active->rgn[i].bytes_dumped != fdm_active->rgn[i].source_len || + fdm_active->rgn[i].error_flags != 0) { + pr_warn("Failed to process additional parameters! Proceeding anyway..\n"); + fadump_conf->param_area = 0; + } + break; + default: + /* + * If the first/crashed kernel added a new region type that the + * second/fadump kernel doesn't recognize, skip it and process + * assuming backward compatibility. + */ + pr_warn("Unknown region found: type: %u src addr: 0x%llx dest addr: 0x%llx\n", + be16_to_cpu(fdm_active->rgn[i].source_data_type), + be64_to_cpu(fdm_active->rgn[i].source_address), + be64_to_cpu(fdm_active->rgn[i].destination_address)); + break; + } } - rc = rtas_fadump_build_cpu_notes(fadump_conf); - if (rc) - return rc; - - /* - * We are done validating dump info and elfcore header is now ready - * to be exported. set elfcorehdr_addr so that vmcore module will - * export the elfcore header through '/proc/vmcore'. - */ - elfcorehdr_addr = fdh->elfcorehdr_addr; - - return 0; + return rtas_fadump_build_cpu_notes(fadump_conf); } static void rtas_fadump_region_show(struct fw_dump *fadump_conf, struct seq_file *m) { - const struct rtas_fadump_section *cpu_data_section; const struct rtas_fadump_mem_struct *fdm_ptr; if (fdm_active) @@ -447,27 +501,49 @@ static void rtas_fadump_region_show(struct fw_dump *fadump_conf, else fdm_ptr = &fdm; - cpu_data_section = &(fdm_ptr->cpu_state_data); - seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", - be64_to_cpu(cpu_data_section->destination_address), - be64_to_cpu(cpu_data_section->destination_address) + - be64_to_cpu(cpu_data_section->source_len) - 1, - be64_to_cpu(cpu_data_section->source_len), - be64_to_cpu(cpu_data_section->bytes_dumped)); - - seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", - be64_to_cpu(fdm_ptr->hpte_region.destination_address), - be64_to_cpu(fdm_ptr->hpte_region.destination_address) + - be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1, - be64_to_cpu(fdm_ptr->hpte_region.source_len), - be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped)); - - seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", - be64_to_cpu(fdm_ptr->rmr_region.source_address), - be64_to_cpu(fdm_ptr->rmr_region.destination_address)); - seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", - be64_to_cpu(fdm_ptr->rmr_region.source_len), - be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); + + for (int i = 0; i < be16_to_cpu(fdm_ptr->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm_ptr->rgn[i].source_data_type); + + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_HPTE_REGION: + seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_REAL_MODE_REGION: + seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", + be64_to_cpu(fdm_ptr->rgn[i].source_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address)); + seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_PARAM_AREA: + seq_printf(m, "\n[%#016llx-%#016llx]: cmdline append: '%s'\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + (char *)__va(be64_to_cpu(fdm_ptr->rgn[i].destination_address))); + break; + default: + seq_printf(m, "Unknown region type %d : Src: %#016llx, Dest: %#016llx, ", + type, be64_to_cpu(fdm_ptr->rgn[i].source_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address)); + break; + } + } /* Dump is active. Show preserved area start address. */ if (fdm_active) { @@ -483,6 +559,20 @@ static void rtas_fadump_trigger(struct fadump_crash_info_header *fdh, rtas_os_term((char *)msg); } +/* FADUMP_MAX_MEM_REGS or lower */ +static int rtas_fadump_max_boot_mem_rgns(void) +{ + /* + * Version 1 of Kernel Assisted Dump Memory Structure (PAPR) supports 10 sections. + * With one each section taken for CPU state data & HPTE respectively, 8 sections + * can be used for boot memory regions. + * + * If new region(s) is(are) defined, maximum boot memory regions will decrease + * proportionally. + */ + return RTAS_FADUMP_MAX_BOOT_MEM_REGS; +} + static struct fadump_ops rtas_fadump_ops = { .fadump_init_mem_struct = rtas_fadump_init_mem_struct, .fadump_get_bootmem_min = rtas_fadump_get_bootmem_min, @@ -492,6 +582,7 @@ static struct fadump_ops rtas_fadump_ops = { .fadump_process = rtas_fadump_process, .fadump_region_show = rtas_fadump_region_show, .fadump_trigger = rtas_fadump_trigger, + .fadump_max_boot_mem_rgns = rtas_fadump_max_boot_mem_rgns, }; void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) @@ -508,9 +599,10 @@ void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) if (!token) return; - fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token); - fadump_conf->ops = &rtas_fadump_ops; - fadump_conf->fadump_supported = 1; + fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token); + fadump_conf->ops = &rtas_fadump_ops; + fadump_conf->fadump_supported = 1; + fadump_conf->param_area_supported = 1; /* Firmware supports 64-bit value for size, align it to pagesize. */ fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE); diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.h b/arch/powerpc/platforms/pseries/rtas-fadump.h index fd59bd7ca9c3..c109abf6befd 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.h +++ b/arch/powerpc/platforms/pseries/rtas-fadump.h @@ -23,12 +23,24 @@ #define RTAS_FADUMP_HPTE_REGION 0x0002 #define RTAS_FADUMP_REAL_MODE_REGION 0x0011 +/* OS defined sections */ +#define RTAS_FADUMP_PARAM_AREA 0x0100 + /* Dump request flag */ #define RTAS_FADUMP_REQUEST_FLAG 0x00000001 /* Dump status flag */ #define RTAS_FADUMP_ERROR_FLAG 0x2000 +/* + * The Firmware Assisted Dump Memory structure supports a maximum of 10 sections + * in the dump memory structure. Presently, three sections are used for + * CPU state data, HPTE & Parameters area, while the remaining seven sections + * can be used for boot memory regions. + */ +#define MAX_SECTIONS 10 +#define RTAS_FADUMP_MAX_BOOT_MEM_REGS 7 + /* Kernel Dump section info */ struct rtas_fadump_section { __be32 request_flag; @@ -61,20 +73,15 @@ struct rtas_fadump_section_header { * Firmware Assisted dump memory structure. This structure is required for * registering future kernel dump with power firmware through rtas call. * - * No disk dump option. Hence disk dump path string section is not included. + * In version 1, the platform permits one section header, dump-disk path + * and ten sections. + * + * Note: No disk dump option. Hence disk dump path string section is not + * included. */ struct rtas_fadump_mem_struct { struct rtas_fadump_section_header header; - - /* Kernel dump sections */ - struct rtas_fadump_section cpu_state_data; - struct rtas_fadump_section hpte_region; - - /* - * TODO: Extend multiple boot memory regions support in the kernel - * for this platform. - */ - struct rtas_fadump_section rmr_region; + struct rtas_fadump_section rgn[MAX_SECTIONS]; }; /* diff --git a/arch/powerpc/platforms/pseries/rtas-work-area.c b/arch/powerpc/platforms/pseries/rtas-work-area.c new file mode 100644 index 000000000000..7fe34bee84d8 --- /dev/null +++ b/arch/powerpc/platforms/pseries/rtas-work-area.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "rtas-work-area: " fmt + +#include <linux/genalloc.h> +#include <linux/log2.h> +#include <linux/kernel.h> +#include <linux/memblock.h> +#include <linux/mempool.h> +#include <linux/minmax.h> +#include <linux/mutex.h> +#include <linux/numa.h> +#include <linux/sizes.h> +#include <linux/wait.h> + +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> + +enum { + /* + * Ensure the pool is page-aligned. + */ + RTAS_WORK_AREA_ARENA_ALIGN = PAGE_SIZE, + /* + * Don't let a single allocation claim the whole arena. + */ + RTAS_WORK_AREA_ARENA_SZ = RTAS_WORK_AREA_MAX_ALLOC_SZ * 2, + /* + * The smallest known work area size is for ibm,get-vpd's + * location code argument, which is limited to 79 characters + * plus 1 nul terminator. + * + * PAPR+ 7.3.20 ibm,get-vpd RTAS Call + * PAPR+ 12.3.2.4 Converged Location Code Rules - Length Restrictions + */ + RTAS_WORK_AREA_MIN_ALLOC_SZ = roundup_pow_of_two(80), +}; + +static struct { + struct gen_pool *gen_pool; + char *arena; + struct mutex mutex; /* serializes allocations */ + struct wait_queue_head wqh; + mempool_t descriptor_pool; + bool available; +} rwa_state = { + .mutex = __MUTEX_INITIALIZER(rwa_state.mutex), + .wqh = __WAIT_QUEUE_HEAD_INITIALIZER(rwa_state.wqh), +}; + +/* + * A single work area buffer and descriptor to serve requests early in + * boot before the allocator is fully initialized. We know 4KB is the + * most any boot time user needs (they all call ibm,get-system-parameter). + */ +static bool early_work_area_in_use __initdata; +static char early_work_area_buf[SZ_4K] __initdata __aligned(SZ_4K); +static struct rtas_work_area early_work_area __initdata = { + .buf = early_work_area_buf, + .size = sizeof(early_work_area_buf), +}; + + +static struct rtas_work_area * __init rtas_work_area_alloc_early(size_t size) +{ + WARN_ON(size > early_work_area.size); + WARN_ON(early_work_area_in_use); + early_work_area_in_use = true; + memset(early_work_area.buf, 0, early_work_area.size); + return &early_work_area; +} + +static void __init rtas_work_area_free_early(struct rtas_work_area *work_area) +{ + WARN_ON(work_area != &early_work_area); + WARN_ON(!early_work_area_in_use); + early_work_area_in_use = false; +} + +struct rtas_work_area * __ref __rtas_work_area_alloc(size_t size) +{ + struct rtas_work_area *area; + unsigned long addr; + + might_sleep(); + + /* + * The rtas_work_area_alloc() wrapper enforces this at build + * time. Requests that exceed the arena size will block + * indefinitely. + */ + WARN_ON(size > RTAS_WORK_AREA_MAX_ALLOC_SZ); + + if (!rwa_state.available) + return rtas_work_area_alloc_early(size); + /* + * To ensure FCFS behavior and prevent a high rate of smaller + * requests from starving larger ones, use the mutex to queue + * allocations. + */ + mutex_lock(&rwa_state.mutex); + wait_event(rwa_state.wqh, + (addr = gen_pool_alloc(rwa_state.gen_pool, size)) != 0); + mutex_unlock(&rwa_state.mutex); + + area = mempool_alloc(&rwa_state.descriptor_pool, GFP_KERNEL); + area->buf = (char *)addr; + area->size = size; + + return area; +} + +void __ref rtas_work_area_free(struct rtas_work_area *area) +{ + if (!rwa_state.available) { + rtas_work_area_free_early(area); + return; + } + + gen_pool_free(rwa_state.gen_pool, (unsigned long)area->buf, area->size); + mempool_free(area, &rwa_state.descriptor_pool); + wake_up(&rwa_state.wqh); +} + +/* + * Initialization of the work area allocator happens in two parts. To + * reliably reserve an arena that satisfies RTAS addressing + * requirements, we must perform a memblock allocation early, + * immmediately after RTAS instantiation. Then we have to wait until + * the slab allocator is up before setting up the descriptor mempool + * and adding the arena to a gen_pool. + */ +static __init int rtas_work_area_allocator_init(void) +{ + const unsigned int order = ilog2(RTAS_WORK_AREA_MIN_ALLOC_SZ); + const phys_addr_t pa_start = __pa(rwa_state.arena); + const phys_addr_t pa_end = pa_start + RTAS_WORK_AREA_ARENA_SZ - 1; + struct gen_pool *pool; + const int nid = NUMA_NO_NODE; + int err; + + err = -ENOMEM; + if (!rwa_state.arena) + goto err_out; + + pool = gen_pool_create(order, nid); + if (!pool) + goto err_out; + /* + * All RTAS functions that consume work areas are OK with + * natural alignment, when they have alignment requirements at + * all. + */ + gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL); + + err = gen_pool_add(pool, (unsigned long)rwa_state.arena, + RTAS_WORK_AREA_ARENA_SZ, nid); + if (err) + goto err_destroy; + + err = mempool_init_kmalloc_pool(&rwa_state.descriptor_pool, 1, + sizeof(struct rtas_work_area)); + if (err) + goto err_destroy; + + rwa_state.gen_pool = pool; + rwa_state.available = true; + + pr_debug("arena [%pa-%pa] (%uK), min/max alloc sizes %u/%u\n", + &pa_start, &pa_end, + RTAS_WORK_AREA_ARENA_SZ / SZ_1K, + RTAS_WORK_AREA_MIN_ALLOC_SZ, + RTAS_WORK_AREA_MAX_ALLOC_SZ); + + return 0; + +err_destroy: + gen_pool_destroy(pool); +err_out: + return err; +} +machine_arch_initcall(pseries, rtas_work_area_allocator_init); + +/** + * rtas_work_area_reserve_arena() - Reserve memory suitable for RTAS work areas. + * @limit: Upper limit for memblock allocation. + */ +void __init rtas_work_area_reserve_arena(const phys_addr_t limit) +{ + const phys_addr_t align = RTAS_WORK_AREA_ARENA_ALIGN; + const phys_addr_t size = RTAS_WORK_AREA_ARENA_SZ; + const phys_addr_t min = MEMBLOCK_LOW_LIMIT; + const int nid = NUMA_NO_NODE; + + /* + * Too early for a machine_is(pseries) check. But PAPR + * effectively mandates that ibm,get-system-parameter is + * present: + * + * R1–7.3.16–1. All platforms must support the System + * Parameters option. + * + * So set up the arena if we find that, with a fallback to + * ibm,configure-connector, just in case. + */ + if (rtas_function_implemented(RTAS_FN_IBM_GET_SYSTEM_PARAMETER) || + rtas_function_implemented(RTAS_FN_IBM_CONFIGURE_CONNECTOR)) + rwa_state.arena = memblock_alloc_try_nid(size, align, min, limit, nid); +} diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ee4f1db49515..b10a25325238 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -14,6 +14,7 @@ #include <linux/cpu.h> #include <linux/errno.h> +#include <linux/platform_device.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -40,6 +41,7 @@ #include <linux/of_pci.h> #include <linux/memblock.h> #include <linux/swiotlb.h> +#include <linux/seq_buf.h> #include <asm/mmu.h> #include <asm/processor.h> @@ -55,6 +57,7 @@ #include <asm/pmc.h> #include <asm/xics.h> #include <asm/xive.h> +#include <asm/papr-sysparm.h> #include <asm/ppc-pci.h> #include <asm/i8259.h> #include <asm/udbg.h> @@ -72,12 +75,27 @@ #include <asm/svm.h> #include <asm/dtl.h> #include <asm/hvconsole.h> +#include <asm/setup.h> #include "pseries.h" DEFINE_STATIC_KEY_FALSE(shared_processor); EXPORT_SYMBOL(shared_processor); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; + +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} + +early_param("no-steal-acc", parse_no_stealacc); +#endif + int CMO_PrPSP = -1; int CMO_SecPSP = -1; unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); @@ -118,11 +136,11 @@ static void __init fwnmi_init(void) #endif int ibm_nmi_register_token; - ibm_nmi_register_token = rtas_token("ibm,nmi-register"); + ibm_nmi_register_token = rtas_function_token(RTAS_FN_IBM_NMI_REGISTER); if (ibm_nmi_register_token == RTAS_UNKNOWN_SERVICE) return; - ibm_nmi_interlock_token = rtas_token("ibm,nmi-interlock"); + ibm_nmi_interlock_token = rtas_function_token(RTAS_FN_IBM_NMI_INTERLOCK); if (WARN_ON(ibm_nmi_interlock_token == RTAS_UNKNOWN_SERVICE)) return; @@ -169,6 +187,18 @@ static void __init fwnmi_init(void) #endif } +/* + * Affix a device for the first timer to the platform bus if + * we have firmware support for the H_WATCHDOG hypercall. + */ +static __init int pseries_wdt_init(void) +{ + if (firmware_has_feature(FW_FEATURE_WATCHDOG)) + platform_device_register_simple("pseries-wdt", 0, NULL, 0); + return 0; +} +machine_subsys_initcall(pseries, pseries_wdt_init); + static void pseries_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -313,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void) { void (*ctor)(void *) = get_dtl_cache_ctor(); - dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, - DISPATCH_LOG_BYTES, 0, ctor); + dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor); if (!dtl_cache) { pr_warn("Failed to create dispatch trace log buffer cache\n"); pr_warn("Stolen time statistics will be unreliable\n"); @@ -786,6 +816,8 @@ static void __init pSeries_setup_arch(void) /* Discover PIC type and setup ppc_md accordingly */ smp_init_pseries(); + // Setup CPU hotplug callbacks + pseries_cpu_hotplug_init(); if (radix_enabled() && !mmu_has_feature(MMU_FTR_GTSE)) if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) @@ -802,9 +834,8 @@ static void __init pSeries_setup_arch(void) fwnmi_init(); pseries_setup_security_mitigations(); -#ifdef CONFIG_PPC_64S_HASH_MMU - pseries_lpar_read_hblkrm_characteristics(); -#endif + if (!radix_enabled()) + pseries_lpar_read_hblkrm_characteristics(); /* By default, only probe PCI (can be overridden by rtas_pci) */ pci_add_flags(PCI_PROBE_ONLY); @@ -818,9 +849,14 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); - if (lppaca_shared_proc(get_lppaca())) { + if (lppaca_shared_proc()) { static_branch_enable(&shared_processor); pv_spinlocks_init(); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); +#endif } ppc_md.power_save = pseries_lpar_idle; @@ -908,28 +944,21 @@ void pSeries_coalesce_init(void) */ static void __init pSeries_cmo_feature_init(void) { + static struct papr_sysparm_buf buf __initdata; + static_assert(sizeof(buf.val) >= CMO_MAXLENGTH); char *ptr, *key, *value, *end; - int call_status; int page_order = IOMMU_PAGE_SHIFT_4K; pr_debug(" -> fw_cmo_feature_init()\n"); - spin_lock(&rtas_data_buf_lock); - memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - CMO_CHARACTERISTICS_TOKEN, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); - - if (call_status != 0) { - spin_unlock(&rtas_data_buf_lock); + + if (papr_sysparm_get(PAPR_SYSPARM_COOP_MEM_OVERCOMMIT_ATTRS, &buf)) { pr_debug("CMO not available\n"); pr_debug(" <- fw_cmo_feature_init()\n"); return; } - end = rtas_data_buf + CMO_MAXLENGTH - 2; - ptr = rtas_data_buf + 2; /* step over strlen value */ + end = &buf.val[CMO_MAXLENGTH]; + ptr = &buf.val[0]; key = value = ptr; while (*ptr && (ptr <= end)) { @@ -975,10 +1004,38 @@ static void __init pSeries_cmo_feature_init(void) } else pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, CMO_SecPSP); - spin_unlock(&rtas_data_buf_lock); pr_debug(" <- fw_cmo_feature_init()\n"); } +static void __init pseries_add_hw_description(void) +{ + struct device_node *dn; + const char *s; + + dn = of_find_node_by_path("/openprom"); + if (dn) { + if (of_property_read_string(dn, "model", &s) == 0) + seq_buf_printf(&ppc_hw_desc, "of:%s ", s); + + of_node_put(dn); + } + + dn = of_find_node_by_path("/hypervisor"); + if (dn) { + if (of_property_read_string(dn, "compatible", &s) == 0) + seq_buf_printf(&ppc_hw_desc, "hv:%s ", s); + + of_node_put(dn); + return; + } + + dn = of_find_node_by_path("/"); + if (of_property_read_bool(dn, "ibm,powervm-partition") || + of_property_read_bool(dn, "ibm,fw-net-version")) + seq_buf_printf(&ppc_hw_desc, "hv:phyp "); + of_node_put(dn); +} + /* * Early initialization. Relocation is on but do not reference unbolted pages */ @@ -986,6 +1043,8 @@ static void __init pseries_init(void) { pr_debug(" -> pseries_init()\n"); + pseries_add_hw_description(); + #ifdef CONFIG_HVC_CONSOLE if (firmware_has_feature(FW_FEATURE_LPAR)) hvc_vio_init_early(); @@ -1016,14 +1075,14 @@ static void __init pseries_init(void) static void pseries_power_off(void) { int rc; - int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups"); + int rtas_poweroff_ups_token = rtas_function_token(RTAS_FN_IBM_POWER_OFF_UPS); if (rtas_flash_term_hook) rtas_flash_term_hook(SYS_POWER_OFF); if (rtas_poweron_auto == 0 || rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) { - rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1); + rc = rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1); printk(KERN_INFO "RTAS power-off returned %d\n", rc); } else { rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL); @@ -1034,7 +1093,11 @@ static void pseries_power_off(void) static int __init pSeries_probe(void) { - if (!of_node_is_type(of_root, "chrp")) + struct device_node *root = of_find_node_by_path("/"); + bool ret = of_node_is_type(root, "chrp"); + + of_node_put(root); + if (!ret) return 0; /* Cell blades firmware claims to be chrp while it's not. Until this @@ -1061,8 +1124,18 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus) return PCI_PROBE_NORMAL; } +#ifdef CONFIG_MEMORY_HOTPLUG +static unsigned long pseries_memory_block_size(void) +{ + return memory_block_size; +} +#endif + struct pci_controller_ops pseries_pci_controller_ops = { .probe_mode = pSeries_pci_probe_mode, +#ifdef CONFIG_SPAPR_TCE_IOMMU + .device_group = pSeries_pci_device_group, +#endif }; define_machine(pseries) { @@ -1080,14 +1153,12 @@ define_machine(pseries) { .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, - .calibrate_decr = generic_calibrate_decr, .progress = rtas_progress, .system_reset_exception = pSeries_system_reset_exception, .machine_check_early = pseries_machine_check_realmode, .machine_check_exception = pSeries_machine_check_exception, .machine_check_log_err = pSeries_machine_check_log_err, #ifdef CONFIG_KEXEC_CORE - .machine_kexec = pseries_machine_kexec, .kexec_cpu_down = pseries_kexec_cpu_down, #endif #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index fd2174edfa1d..db99725e752b 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -39,7 +39,7 @@ #include <asm/xive.h> #include <asm/dbell.h> #include <asm/plpar_wrappers.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/svm.h> #include <asm/kvm_guest.h> @@ -55,7 +55,7 @@ static cpumask_var_t of_spin_mask; int smp_query_cpu_stopped(unsigned int pcpu) { int cpu_status, status; - int qcss_tok = rtas_token("query-cpu-stopped-state"); + int qcss_tok = rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE); if (qcss_tok == RTAS_UNKNOWN_SERVICE) { printk_once(KERN_INFO @@ -108,7 +108,7 @@ static inline int smp_startup_cpu(unsigned int lcpu) * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. */ - start_cpu = rtas_token("start-cpu"); + start_cpu = rtas_function_token(RTAS_FN_START_CPU); if (start_cpu == RTAS_UNKNOWN_SERVICE) return 1; @@ -266,7 +266,7 @@ void __init smp_init_pseries(void) * We know prom_init will not have started them if RTAS supports * query-cpu-stopped-state. */ - if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) { + if (rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE) == RTAS_UNKNOWN_SERVICE) { if (cpu_has_feature(CPU_FTR_SMT)) { for_each_present_cpu(i) { if (cpu_thread_in_core(i) == 0) @@ -278,11 +278,5 @@ void __init smp_init_pseries(void) cpumask_clear_cpu(boot_cpuid, of_spin_mask); } - /* Non-lpar has additional take/give timebase */ - if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { - smp_ops->give_timebase = rtas_give_timebase; - smp_ops->take_timebase = rtas_take_timebase; - } - pr_debug(" <- smp_init_pSeries()\n"); } diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index 1b902cbf85c5..382003dfdb9a 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -13,6 +13,7 @@ #include <asm/mmu.h> #include <asm/rtas.h> #include <asm/topology.h> +#include "pseries.h" static struct device suspend_dev; @@ -143,6 +144,7 @@ static const struct platform_suspend_ops pseries_suspend_ops = { **/ static int pseries_suspend_sysfs_register(struct device *dev) { + struct device *dev_root; int rc; if ((rc = subsys_system_register(&suspend_subsys, NULL))) @@ -151,8 +153,13 @@ static int pseries_suspend_sysfs_register(struct device *dev) dev->id = 0; dev->bus = &suspend_subsys; - if ((rc = device_create_file(suspend_subsys.dev_root, &dev_attr_hibernate))) - goto subsys_unregister; + dev_root = bus_get_dev_root(&suspend_subsys); + if (dev_root) { + rc = device_create_file(dev_root, &dev_attr_hibernate); + put_device(dev_root); + if (rc) + goto subsys_unregister; + } return 0; diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index 3b4045d508ec..384c9dc1899a 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -8,6 +8,7 @@ #include <linux/mm.h> #include <linux/memblock.h> +#include <linux/mem_encrypt.h> #include <linux/cc_platform.h> #include <asm/machdep.h> #include <asm/svm.h> diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c index f9f682724e77..9e05a0e99cad 100644 --- a/arch/powerpc/platforms/pseries/vas-sysfs.c +++ b/arch/powerpc/platforms/pseries/vas-sysfs.c @@ -162,13 +162,13 @@ static const struct sysfs_ops vas_sysfs_ops = { .store = vas_type_store, }; -static struct kobj_type vas_def_attr_type = { +static const struct kobj_type vas_def_attr_type = { .release = vas_type_release, .sysfs_ops = &vas_sysfs_ops, .default_groups = vas_def_capab_groups, }; -static struct kobj_type vas_qos_attr_type = { +static const struct kobj_type vas_qos_attr_type = { .release = vas_type_release, .sysfs_ops = &vas_sysfs_ops, .default_groups = vas_qos_capab_groups, diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index 500a1fc4a1d7..c25eb1a38185 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -16,6 +16,8 @@ #include <asm/machdep.h> #include <asm/hvcall.h> #include <asm/plpar_wrappers.h> +#include <asm/firmware.h> +#include <asm/vphn.h> #include <asm/vas.h> #include "vas.h" @@ -36,7 +38,27 @@ static long hcall_return_busy_check(long rc) { /* Check if we are stalled for some time */ if (H_IS_LONG_BUSY(rc)) { - msleep(get_longbusy_msecs(rc)); + unsigned int ms; + /* + * Allocate, Modify and Deallocate HCALLs returns + * H_LONG_BUSY_ORDER_1_MSEC or H_LONG_BUSY_ORDER_10_MSEC + * for the long delay. So the sleep time should always + * be either 1 or 10msecs, but in case if the HCALL + * returns the long delay > 10 msecs, clamp the sleep + * time to 10msecs. + */ + ms = clamp(get_longbusy_msecs(rc), 1, 10); + + /* + * msleep() will often sleep at least 20 msecs even + * though the hypervisor suggests that the OS reissue + * HCALLs after 1 or 10msecs. Also the delay hint from + * the HCALL is just a suggestion. So OK to pause for + * less time than the hinted delay. Use usleep_range() + * to ensure we don't sleep much longer than actually + * needed. + */ + usleep_range(ms * (USEC_PER_MSEC / 10), ms * USEC_PER_MSEC); rc = H_BUSY; } else if (rc == H_BUSY) { cond_resched(); @@ -199,17 +221,42 @@ static irqreturn_t pseries_vas_fault_thread_fn(int irq, void *data) struct vas_user_win_ref *tsk_ref; int rc; - rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb)); - if (!rc) { - tsk_ref = &txwin->vas_win.task_ref; - vas_dump_crb(&crb); - vas_update_csb(&crb, tsk_ref); + while (atomic_read(&txwin->pending_faults)) { + rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb)); + if (!rc) { + tsk_ref = &txwin->vas_win.task_ref; + vas_dump_crb(&crb); + vas_update_csb(&crb, tsk_ref); + } + atomic_dec(&txwin->pending_faults); } return IRQ_HANDLED; } /* + * irq_default_primary_handler() can be used only with IRQF_ONESHOT + * which disables IRQ before executing the thread handler and enables + * it after. But this disabling interrupt sets the VAS IRQ OFF + * state in the hypervisor. If the NX generates fault interrupt + * during this window, the hypervisor will not deliver this + * interrupt to the LPAR. So use VAS specific IRQ handler instead + * of calling the default primary handler. + */ +static irqreturn_t pseries_vas_irq_handler(int irq, void *data) +{ + struct pseries_vas_window *txwin = data; + + /* + * The thread handler will process this interrupt if it is + * already running. + */ + atomic_inc(&txwin->pending_faults); + + return IRQ_WAKE_THREAD; +} + +/* * Allocate window and setup IRQ mapping. */ static int allocate_setup_window(struct pseries_vas_window *txwin, @@ -239,8 +286,9 @@ static int allocate_setup_window(struct pseries_vas_window *txwin, goto out_irq; } - rc = request_threaded_irq(txwin->fault_virq, NULL, - pseries_vas_fault_thread_fn, IRQF_ONESHOT, + rc = request_threaded_irq(txwin->fault_virq, + pseries_vas_irq_handler, + pseries_vas_fault_thread_fn, 0, txwin->name, txwin); if (rc) { pr_err("VAS-Window[%d]: Request IRQ(%u) failed with %d\n", @@ -313,7 +361,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, if (atomic_inc_return(&cop_feat_caps->nr_used_credits) > atomic_read(&cop_feat_caps->nr_total_credits)) { - pr_err("Credits are not available to allocate window\n"); + pr_err_ratelimited("Credits are not available to allocate window\n"); rc = -EINVAL; goto out; } @@ -332,7 +380,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, * So no unpacking needs to be done. */ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain, - VPHN_FLAG_VCPU, smp_processor_id()); + VPHN_FLAG_VCPU, hard_smp_processor_id()); if (rc != H_SUCCESS) { pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc); goto out; @@ -357,11 +405,15 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, * same fault IRQ is not freed by the OS before. */ mutex_lock(&vas_pseries_mutex); - if (migration_in_progress) + if (migration_in_progress) { rc = -EBUSY; - else + } else { rc = allocate_setup_window(txwin, (u64 *)&domain[0], cop_feat_caps->win_type); + if (!rc) + caps->nr_open_wins_progress++; + } + mutex_unlock(&vas_pseries_mutex); if (rc) goto out; @@ -376,8 +428,17 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, goto out_free; txwin->win_type = cop_feat_caps->win_type; - mutex_lock(&vas_pseries_mutex); + /* + * The migration SUSPEND thread sets migration_in_progress and + * closes all open windows from the list. But the window is + * added to the list after open and modify HCALLs. So possible + * that migration_in_progress is set before modify HCALL which + * may cause some windows are still open when the hypervisor + * initiates the migration. + * So checks the migration_in_progress flag again and close all + * open windows. + * * Possible to lose the acquired credit with DLPAR core * removal after the window is opened. So if there are any * closed windows (means with lost credits), do not give new @@ -385,9 +446,11 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, * after the existing windows are reopened when credits are * available. */ - if (!caps->nr_close_wins) { + mutex_lock(&vas_pseries_mutex); + if (!caps->nr_close_wins && !migration_in_progress) { list_add(&txwin->win_list, &caps->list); caps->nr_open_windows++; + caps->nr_open_wins_progress--; mutex_unlock(&vas_pseries_mutex); vas_user_win_add_mm_context(&txwin->vas_win.task_ref); return &txwin->vas_win; @@ -396,7 +459,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, put_vas_user_win_ref(&txwin->vas_win.task_ref); rc = -EBUSY; - pr_err("No credit is available to allocate window\n"); + pr_err_ratelimited("No credit is available to allocate window\n"); out_free: /* @@ -405,6 +468,12 @@ out_free: */ free_irq_setup(txwin); h_deallocate_vas_window(txwin->vas_win.winid); + /* + * Hold mutex and reduce nr_open_wins_progress counter. + */ + mutex_lock(&vas_pseries_mutex); + caps->nr_open_wins_progress--; + mutex_unlock(&vas_pseries_mutex); out: atomic_dec(&cop_feat_caps->nr_used_credits); kfree(txwin); @@ -480,8 +549,8 @@ static int vas_deallocate_window(struct vas_window *vwin) vascaps[win->win_type].nr_open_windows--; mutex_unlock(&vas_pseries_mutex); - put_vas_user_win_ref(&vwin->task_ref); mm_context_remove_vas_window(vwin->task_ref.mm); + put_vas_user_win_ref(&vwin->task_ref); kfree(win); return 0; @@ -500,14 +569,10 @@ static const struct vas_user_win_ops vops_pseries = { int vas_register_api_pseries(struct module *mod, enum vas_cop_type cop_type, const char *name) { - int rc; - if (!copypaste_feat) return -ENOTSUPP; - rc = vas_register_coproc_api(mod, cop_type, name, &vops_pseries); - - return rc; + return vas_register_coproc_api(mod, cop_type, name, &vops_pseries); } EXPORT_SYMBOL_GPL(vas_register_api_pseries); @@ -721,6 +786,12 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds, } task_ref = &win->vas_win.task_ref; + /* + * VAS mmap (coproc_mmap()) and its fault handler + * (vas_mmap_fault()) are called after holding mmap lock. + * So hold mmap mutex after mmap_lock to avoid deadlock. + */ + mmap_write_lock(task_ref->mm); mutex_lock(&task_ref->mmap_mutex); vma = task_ref->vma; /* @@ -729,7 +800,6 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds, */ win->vas_win.status |= flag; - mmap_write_lock(task_ref->mm); /* * vma is set in the original mapping. But this mapping * is done with mmap() after the window is opened with ioctl. @@ -737,11 +807,10 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds, * is done before the original mmap() and after the ioctl. */ if (vma) - zap_page_range(vma, vma->vm_start, - vma->vm_end - vma->vm_start); + zap_vma_pages(vma); - mmap_write_unlock(task_ref->mm); mutex_unlock(&task_ref->mmap_mutex); + mmap_write_unlock(task_ref->mm); /* * Close VAS window in the hypervisor, but do not * free vas_window struct since it may be reused @@ -803,7 +872,7 @@ int vas_reconfig_capabilties(u8 type, int new_nr_creds) * The total number of available credits may be decreased or * increased with DLPAR operation. Means some windows have to be * closed / reopened. Hold the vas_pseries_mutex so that the - * the user space can not open new windows. + * user space can not open new windows. */ if (old_nr_creds < new_nr_creds) { /* @@ -829,6 +898,32 @@ int vas_reconfig_capabilties(u8 type, int new_nr_creds) mutex_unlock(&vas_pseries_mutex); return rc; } + +int pseries_vas_dlpar_cpu(void) +{ + int new_nr_creds, rc; + + /* + * NX-GZIP is not enabled. Nothing to do for DLPAR event + */ + if (!copypaste_feat) + return 0; + + + rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, + vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat, + (u64)virt_to_phys(&hv_cop_caps)); + if (!rc) { + new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds); + rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE, new_nr_creds); + } + + if (rc) + pr_err("Failed reconfig VAS capabilities with DLPAR\n"); + + return rc; +} + /* * Total number of default credits available (target_credits) * in LPAR depends on number of cores configured. It varies based on @@ -843,7 +938,15 @@ static int pseries_vas_notifier(struct notifier_block *nb, struct of_reconfig_data *rd = data; struct device_node *dn = rd->dn; const __be32 *intserv = NULL; - int new_nr_creds, len, rc = 0; + int len; + + /* + * For shared CPU partition, the hypervisor assigns total credits + * based on entitled core capacity. So updating VAS windows will + * be called from lparcfg_write(). + */ + if (is_shared_processor()) + return NOTIFY_OK; if ((action == OF_RECONFIG_ATTACH_NODE) || (action == OF_RECONFIG_DETACH_NODE)) @@ -855,19 +958,7 @@ static int pseries_vas_notifier(struct notifier_block *nb, if (!intserv) return NOTIFY_OK; - rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, - vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat, - (u64)virt_to_phys(&hv_cop_caps)); - if (!rc) { - new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds); - rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE, - new_nr_creds); - } - - if (rc) - pr_err("Failed reconfig VAS capabilities with DLPAR\n"); - - return rc; + return pseries_vas_dlpar_cpu(); } static struct notifier_block pseries_vas_nb = { @@ -887,14 +978,14 @@ int vas_migration_handler(int action) struct vas_caps *vcaps; int i, rc = 0; + pr_info("VAS migration event %d\n", action); + /* * NX-GZIP is not enabled. Nothing to do for migration. */ if (!copypaste_feat) return rc; - mutex_lock(&vas_pseries_mutex); - if (action == VAS_SUSPEND) migration_in_progress = true; else @@ -940,12 +1031,27 @@ int vas_migration_handler(int action) switch (action) { case VAS_SUSPEND: + mutex_lock(&vas_pseries_mutex); rc = reconfig_close_windows(vcaps, vcaps->nr_open_windows, true); + /* + * Windows are included in the list after successful + * open. So wait for closing these in-progress open + * windows in vas_allocate_window() which will be + * done if the migration_in_progress is set. + */ + while (vcaps->nr_open_wins_progress) { + mutex_unlock(&vas_pseries_mutex); + msleep(10); + mutex_lock(&vas_pseries_mutex); + } + mutex_unlock(&vas_pseries_mutex); break; case VAS_RESUME: + mutex_lock(&vas_pseries_mutex); atomic_set(&caps->nr_total_credits, new_nr_creds); rc = reconfig_open_windows(vcaps, new_nr_creds, true); + mutex_unlock(&vas_pseries_mutex); break; default: /* should not happen */ @@ -961,8 +1067,9 @@ int vas_migration_handler(int action) goto out; } + pr_info("VAS migration event (%d) successful\n", action); + out: - mutex_unlock(&vas_pseries_mutex); return rc; } @@ -975,6 +1082,7 @@ static int __init pseries_vas_init(void) * Linux supports user space COPY/PASTE only with Radix */ if (!radix_enabled()) { + copypaste_feat = false; pr_err("API is supported only with radix page tables\n"); return -ENOTSUPP; } diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h index 333ffa2f9f42..45567cd13178 100644 --- a/arch/powerpc/platforms/pseries/vas.h +++ b/arch/powerpc/platforms/pseries/vas.h @@ -91,6 +91,8 @@ struct vas_cop_feat_caps { struct vas_caps { struct vas_cop_feat_caps caps; struct list_head list; /* List of open windows */ + int nr_open_wins_progress; /* Number of open windows in */ + /* progress. Used in migration */ int nr_close_wins; /* closed windows in the hypervisor for DLPAR */ int nr_open_windows; /* Number of successful open windows */ u8 feat; /* Feature type */ @@ -132,6 +134,7 @@ struct pseries_vas_window { u64 flags; char *name; int fault_virq; + atomic_t pending_faults; /* Number of pending faults */ }; int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps); @@ -140,10 +143,15 @@ int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps); #ifdef CONFIG_PPC_VAS int vas_migration_handler(int action); +int pseries_vas_dlpar_cpu(void); #else static inline int vas_migration_handler(int action) { return 0; } +static inline int pseries_vas_dlpar_cpu(void) +{ + return 0; +} #endif #endif /* _VAS_H */ diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 00ecac2c205b..ac1d2d2c9a88 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -611,7 +611,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = { .get_required_mask = dma_iommu_get_required_mask, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; @@ -991,22 +991,10 @@ static DEVICE_ATTR_RO(cmo_allocated); static DEVICE_ATTR_RW(cmo_desired); static DEVICE_ATTR_RW(cmo_allocs_failed); -static struct attribute *vio_cmo_dev_attrs[] = { - &dev_attr_name.attr, - &dev_attr_devspec.attr, - &dev_attr_modalias.attr, - &dev_attr_cmo_entitled.attr, - &dev_attr_cmo_allocated.attr, - &dev_attr_cmo_desired.attr, - &dev_attr_cmo_allocs_failed.attr, - NULL, -}; -ATTRIBUTE_GROUPS(vio_cmo_dev); - /* sysfs bus functions and data structures for CMO */ #define viobus_cmo_rd_attr(name) \ -static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \ +static ssize_t cmo_bus_##name##_show(const struct bus_type *bt, char *buf) \ { \ return sprintf(buf, "%lu\n", vio_cmo.name); \ } \ @@ -1015,7 +1003,7 @@ static struct bus_attribute bus_attr_cmo_bus_##name = \ #define viobus_cmo_pool_rd_attr(name, var) \ static ssize_t \ -cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \ +cmo_##name##_##var##_show(const struct bus_type *bt, char *buf) \ { \ return sprintf(buf, "%lu\n", vio_cmo.name.var); \ } \ @@ -1030,12 +1018,12 @@ viobus_cmo_pool_rd_attr(reserve, size); viobus_cmo_pool_rd_attr(excess, size); viobus_cmo_pool_rd_attr(excess, free); -static ssize_t cmo_high_show(struct bus_type *bt, char *buf) +static ssize_t cmo_high_show(const struct bus_type *bt, char *buf) { return sprintf(buf, "%lu\n", vio_cmo.high); } -static ssize_t cmo_high_store(struct bus_type *bt, const char *buf, +static ssize_t cmo_high_store(const struct bus_type *bt, const char *buf, size_t count) { unsigned long flags; @@ -1062,11 +1050,7 @@ static struct attribute *vio_bus_attrs[] = { }; ATTRIBUTE_GROUPS(vio_bus); -static void __init vio_cmo_sysfs_init(void) -{ - vio_bus_type.dev_groups = vio_cmo_dev_groups; - vio_bus_type.bus_groups = vio_bus_groups; -} +static void __init vio_cmo_sysfs_init(void) { } #else /* CONFIG_PPC_SMLPAR */ int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} @@ -1381,7 +1365,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) } if (family == PFO) { - if (of_get_property(of_node, "interrupt-controller", NULL)) { + if (of_property_read_bool(of_node, "interrupt-controller")) { pr_debug("%s: Skipping the interrupt controller %pOFn.\n", __func__, of_node); return NULL; @@ -1440,7 +1424,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) viodev->dev.bus = &vio_bus_type; viodev->dev.release = vio_dev_release; - if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) { + if (of_property_present(viodev->dev.of_node, "ibm,my-dma-window")) { if (firmware_has_feature(FW_FEATURE_CMO)) vio_cmo_set_dma_ops(viodev); else @@ -1584,14 +1568,6 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(modalias); -static struct attribute *vio_dev_attrs[] = { - &dev_attr_name.attr, - &dev_attr_devspec.attr, - &dev_attr_modalias.attr, - NULL, -}; -ATTRIBUTE_GROUPS(vio_dev); - void vio_unregister_device(struct vio_dev *viodev) { device_unregister(&viodev->dev); @@ -1600,33 +1576,61 @@ void vio_unregister_device(struct vio_dev *viodev) } EXPORT_SYMBOL(vio_unregister_device); -static int vio_bus_match(struct device *dev, struct device_driver *drv) +static int vio_bus_match(struct device *dev, const struct device_driver *drv) { const struct vio_dev *vio_dev = to_vio_dev(dev); - struct vio_driver *vio_drv = to_vio_driver(drv); + const struct vio_driver *vio_drv = to_vio_driver(drv); const struct vio_device_id *ids = vio_drv->id_table; return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); } -static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) +static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env) { const struct vio_dev *vio_dev = to_vio_dev(dev); - struct device_node *dn; + const struct device_node *dn; const char *cp; dn = dev->of_node; - if (!dn) - return -ENODEV; - cp = of_get_property(dn, "compatible", NULL); - if (!cp) - return -ENODEV; + if (dn && (cp = of_get_property(dn, "compatible", NULL))) + add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); - add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); return 0; } -struct bus_type vio_bus_type = { +#ifdef CONFIG_PPC_SMLPAR +static struct attribute *vio_cmo_dev_attrs[] = { + &dev_attr_name.attr, + &dev_attr_devspec.attr, + &dev_attr_modalias.attr, + &dev_attr_cmo_entitled.attr, + &dev_attr_cmo_allocated.attr, + &dev_attr_cmo_desired.attr, + &dev_attr_cmo_allocs_failed.attr, + NULL, +}; +ATTRIBUTE_GROUPS(vio_cmo_dev); + +const struct bus_type vio_bus_type = { + .name = "vio", + .dev_groups = vio_cmo_dev_groups, + .bus_groups = vio_bus_groups, + .uevent = vio_hotplug, + .match = vio_bus_match, + .probe = vio_bus_probe, + .remove = vio_bus_remove, + .shutdown = vio_bus_shutdown, +}; +#else /* CONFIG_PPC_SMLPAR */ +static struct attribute *vio_dev_attrs[] = { + &dev_attr_name.attr, + &dev_attr_devspec.attr, + &dev_attr_modalias.attr, + NULL, +}; +ATTRIBUTE_GROUPS(vio_dev); + +const struct bus_type vio_bus_type = { .name = "vio", .dev_groups = vio_dev_groups, .uevent = vio_hotplug, @@ -1635,6 +1639,7 @@ struct bus_type vio_bus_type = { .remove = vio_bus_remove, .shutdown = vio_bus_shutdown, }; +#endif /* CONFIG_PPC_SMLPAR */ /** * vio_get_attribute: - get attribute for virtual device @@ -1684,7 +1689,7 @@ struct vio_dev *vio_find_node(struct device_node *vnode) /* construct the kobject name from the device node */ if (of_node_is_type(vnode_parent, "vdevice")) { const __be32 *prop; - + prop = of_get_property(vnode, "reg", NULL); if (!prop) goto out; diff --git a/arch/powerpc/platforms/pseries/vphn.c b/arch/powerpc/platforms/pseries/vphn.c index cca474a2c396..3f85ece3c872 100644 --- a/arch/powerpc/platforms/pseries/vphn.c +++ b/arch/powerpc/platforms/pseries/vphn.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <asm/byteorder.h> -#include <asm/lppaca.h> +#include <asm/vphn.h> /* * The associativity domain numbers are returned from the hypervisor as a |