diff options
Diffstat (limited to 'arch/powerpc/platforms')
169 files changed, 4186 insertions, 8634 deletions
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig deleted file mode 100644 index b3c466c50535..000000000000 --- a/arch/powerpc/platforms/40x/Kconfig +++ /dev/null @@ -1,78 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config ACADIA - bool "Acadia" - depends on 40x - select PPC40x_SIMPLE - select 405EZ - help - This option enables support for the AMCC 405EZ Acadia evaluation board. - -config HOTFOOT - bool "Hotfoot" - depends on 40x - select PPC40x_SIMPLE - select FORCE_PCI - help - This option enables support for the ESTEEM 195E Hotfoot board. - -config KILAUEA - bool "Kilauea" - depends on 40x - select 405EX - select PPC40x_SIMPLE - select PPC4xx_PCI_EXPRESS - select FORCE_PCI - select PCI_MSI - help - This option enables support for the AMCC PPC405EX evaluation board. - -config MAKALU - bool "Makalu" - depends on 40x - select 405EX - select FORCE_PCI - select PPC4xx_PCI_EXPRESS - select PPC40x_SIMPLE - help - This option enables support for the AMCC PPC405EX board. - -config OBS600 - bool "OpenBlockS 600" - depends on 40x - select 405EX - select PPC40x_SIMPLE - help - This option enables support for PlatHome OpenBlockS 600 server - -config PPC40x_SIMPLE - bool "Simple PowerPC 40x board support" - depends on 40x - help - This option enables the simple PowerPC 40x platform support. - -config 405EX - bool - select IBM_EMAC_EMAC4 if IBM_EMAC - select IBM_EMAC_RGMII if IBM_EMAC - -config 405EZ - bool - select IBM_EMAC_NO_FLOW_CTRL if IBM_EMAC - select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC - select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC - -config PPC4xx_GPIO - bool "PPC4xx GPIO support" - depends on 40x - select GPIOLIB - select OF_GPIO_MM_GPIOCHIP - help - Enable gpiolib support for ppc40x based boards - -config APM8018X - bool "APM8018X" - depends on 40x - select PPC40x_SIMPLE - help - This option enables support for the AppliedMicro APM8018X evaluation - board. diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile deleted file mode 100644 index 122de98527c4..000000000000 --- a/arch/powerpc/platforms/40x/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c deleted file mode 100644 index 294ab2728588..000000000000 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ /dev/null @@ -1,74 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic PowerPC 40x platform support - * - * Copyright 2008 IBM Corporation - * - * This implements simple platform support for PowerPC 44x chips. This is - * mostly used for eval boards or other simple and "generic" 44x boards. If - * your board has custom functions or hardware, then you will likely want to - * implement your own board.c file to accommodate it. - */ - -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <asm/ppc4xx.h> -#include <asm/time.h> -#include <asm/udbg.h> -#include <asm/uic.h> - -#include <linux/init.h> -#include <linux/of_platform.h> - -static const struct of_device_id ppc40x_of_bus[] __initconst = { - { .compatible = "ibm,plb3", }, - { .compatible = "ibm,plb4", }, - { .compatible = "ibm,opb", }, - { .compatible = "ibm,ebc", }, - { .compatible = "simple-bus", }, - {}, -}; - -static int __init ppc40x_device_probe(void) -{ - of_platform_bus_probe(NULL, ppc40x_of_bus, NULL); - - return 0; -} -machine_device_initcall(ppc40x_simple, ppc40x_device_probe); - -/* This is the list of boards that can be supported by this simple - * platform code. This does _not_ mean the boards are compatible, - * as they most certainly are not from a device tree perspective. - * However, their differences are handled by the device tree and the - * drivers and therefore they don't need custom board support files. - * - * Again, if your board needs to do things differently then create a - * board.c file for it rather than adding it to this list. - */ -static const char * const board[] __initconst = { - "amcc,acadia", - "amcc,haleakala", - "amcc,kilauea", - "amcc,makalu", - "apm,klondike", - "est,hotfoot", - "plathome,obs600", - NULL -}; - -static int __init ppc40x_probe(void) -{ - pci_set_flags(PCI_REASSIGN_ALL_RSRC); - return 1; -} - -define_machine(ppc40x_simple) { - .name = "PowerPC 40x Platform", - .compatibles = board, - .probe = ppc40x_probe, - .progress = udbg_progress, - .init_IRQ = uic_init_tree, - .get_irq = uic_get_irq, - .restart = ppc4xx_reset_system, -}; diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index 5ba031f57652..ca7b1bb442d9 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += misc_44x.o machine_check.o +obj-y += misc_44x.o machine_check.o uic.o soc.o ifneq ($(CONFIG_PPC4xx_CPM),y) obj-y += idle.o endif @@ -12,3 +12,7 @@ obj-$(CONFIG_CANYONLANDS)+= canyonlands.o obj-$(CONFIG_CURRITUCK) += ppc476.o obj-$(CONFIG_AKEBONO) += ppc476.o obj-$(CONFIG_FSP2) += fsp2.o +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o +obj-$(CONFIG_PPC4xx_CPM) += cpm.o +obj-$(CONFIG_PPC4xx_GPIO) += gpio.o diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/44x/cpm.c index 670f8ad4465b..670f8ad4465b 100644 --- a/arch/powerpc/platforms/4xx/cpm.c +++ b/arch/powerpc/platforms/44x/cpm.c diff --git a/arch/powerpc/platforms/4xx/gpio.c b/arch/powerpc/platforms/44x/gpio.c index e5f2319e5cbe..d540e261d85a 100644 --- a/arch/powerpc/platforms/4xx/gpio.c +++ b/arch/powerpc/platforms/44x/gpio.c @@ -75,8 +75,7 @@ __ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) clrbits32(®s->or, GPIO_MASK(gpio)); } -static void -ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); unsigned long flags; @@ -88,6 +87,8 @@ ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) spin_unlock_irqrestore(&chip->lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); + + return 0; } static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -179,7 +180,7 @@ static int __init ppc4xx_add_gpiochips(void) gc->direction_input = ppc4xx_gpio_dir_in; gc->direction_output = ppc4xx_gpio_dir_out; gc->get = ppc4xx_gpio_get; - gc->set = ppc4xx_gpio_set; + gc->set_rv = ppc4xx_gpio_set; ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc); if (ret) diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/44x/hsta_msi.c index c6bd846b0d65..c6bd846b0d65 100644 --- a/arch/powerpc/platforms/4xx/hsta_msi.c +++ b/arch/powerpc/platforms/44x/hsta_msi.c diff --git a/arch/powerpc/platforms/44x/machine_check.c b/arch/powerpc/platforms/44x/machine_check.c index 5d19daacd78a..85ff33a8d9b6 100644 --- a/arch/powerpc/platforms/44x/machine_check.c +++ b/arch/powerpc/platforms/44x/machine_check.c @@ -9,6 +9,21 @@ #include <asm/reg.h> #include <asm/cacheflush.h> +int machine_check_4xx(struct pt_regs *regs) +{ + unsigned long reason = regs->esr; + + if (reason & ESR_IMCP) { + printk("Instruction"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + } else + printk("Data"); + + printk(" machine check in kernel mode.\n"); + + return 0; +} + int machine_check_440A(struct pt_regs *regs) { unsigned long reason = regs->esr; diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/44x/pci.c index 48626615b18b..364aeb86ab64 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/44x/pci.c @@ -94,10 +94,8 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, struct resource *res) { u64 size; - const u32 *ranges; - int rlen; - int pna = of_n_addr_cells(hose->dn); - int np = pna + 5; + struct of_range_parser parser; + struct of_range range; /* Default */ res->start = 0; @@ -105,18 +103,15 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, res->end = size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; - /* Get dma-ranges property */ - ranges = of_get_property(hose->dn, "dma-ranges", &rlen); - if (ranges == NULL) + if (of_pci_dma_range_parser_init(&parser, hose->dn)) goto out; - /* Walk it */ - while ((rlen -= np * 4) >= 0) { - u32 pci_space = ranges[0]; - u64 pci_addr = of_read_number(ranges + 1, 2); - u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3); - size = of_read_number(ranges + pna + 3, 2); - ranges += np; + for_each_of_range(&parser, &range) { + u32 pci_space = range.flags; + u64 pci_addr = range.bus_addr; + u64 cpu_addr = range.cpu_addr; + size = range.size; + if (cpu_addr == OF_BAD_ADDR || size == 0) continue; @@ -1263,102 +1258,6 @@ static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { #endif /* CONFIG_44x */ -#ifdef CONFIG_40x - -static int __init ppc405ex_pciex_core_init(struct device_node *np) -{ - /* Nothing to do, return 2 ports */ - return 2; -} - -static void __init ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port) -{ - /* Assert the PE0_PHY reset */ - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000); - msleep(1); - - /* deassert the PE0_hotreset */ - if (port->endpoint) - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000); - else - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000); - - /* poll for phy !reset */ - /* XXX FIXME add timeout */ - while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000)) - ; - - /* deassert the PE0_gpl_utl_reset */ - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000); -} - -static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port) -{ - u32 val; - - if (port->endpoint) - val = PTYPE_LEGACY_ENDPOINT; - else - val = PTYPE_ROOT_PORT; - - mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, - 1 << 24 | val << 20 | LNKW_X1 << 12); - - mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000); - mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000); - mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000); - mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003); - - /* - * Only reset the PHY when no link is currently established. - * This is for the Atheros PCIe board which has problems to establish - * the link (again) after this PHY reset. All other currently tested - * PCIe boards don't show this problem. - * This has to be re-tested and fixed in a later release! - */ - val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP); - if (!(val & 0x00001000)) - ppc405ex_pcie_phy_reset(port); - - dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */ - - port->has_ibpre = 1; - - return ppc4xx_pciex_port_reset_sdr(port); -} - -static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port) -{ - dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0); - - /* - * Set buffer allocations and then assert VRB and TXE. - */ - out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000); - out_be32(port->utl_base + PEUTL_INTR, 0x02000000); - out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000); - out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000); - out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000); - out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000); - out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000); - out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); - - out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000); - - return 0; -} - -static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = -{ - .want_sdr = true, - .core_init = ppc405ex_pciex_core_init, - .port_init_hw = ppc405ex_pciex_init_port_hw, - .setup_utl = ppc405ex_pciex_init_utl, - .check_link = ppc4xx_pciex_check_link_sdr, -}; - -#endif /* CONFIG_40x */ - #ifdef CONFIG_476FPE static int __init ppc_476fpe_pciex_core_init(struct device_node *np) { @@ -1427,10 +1326,6 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np) if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx")) ppc4xx_pciex_hwops = &apm821xx_pcie_hwops; #endif /* CONFIG_44x */ -#ifdef CONFIG_40x - if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) - ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops; -#endif #ifdef CONFIG_476FPE if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe") || of_device_is_compatible(np, "ibm,plb-pciex-476gtr")) diff --git a/arch/powerpc/platforms/4xx/pci.h b/arch/powerpc/platforms/44x/pci.h index bb4821938ab1..bb4821938ab1 100644 --- a/arch/powerpc/platforms/4xx/pci.h +++ b/arch/powerpc/platforms/44x/pci.h diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index 164cbcd4588e..e7b7bdaad341 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -95,7 +95,7 @@ static int avr_probe(struct i2c_client *client) } static const struct i2c_device_id avr_id[] = { - { "akebono-avr", 0 }, + { "akebono-avr" }, { } }; diff --git a/arch/powerpc/platforms/4xx/soc.c b/arch/powerpc/platforms/44x/soc.c index 5412e6b21e10..5412e6b21e10 100644 --- a/arch/powerpc/platforms/4xx/soc.c +++ b/arch/powerpc/platforms/44x/soc.c diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/44x/uic.c index e3e148b9dd18..85daf841fd3f 100644 --- a/arch/powerpc/platforms/4xx/uic.c +++ b/arch/powerpc/platforms/44x/uic.c @@ -37,7 +37,7 @@ #define UIC_VR 0x7 #define UIC_VCR 0x8 -struct uic *primary_uic; +static struct uic *primary_uic; struct uic { int index; @@ -254,8 +254,9 @@ static struct uic * __init uic_init_one(struct device_node *node) } uic->dcrbase = *dcrreg; - uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, - uic); + uic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), + NR_UIC_INTS, &uic_host_ops, + uic); if (! uic->irqhost) return NULL; /* FIXME: panic? */ @@ -291,7 +292,7 @@ void __init uic_init_tree(void) if (!primary_uic) panic("Unable to initialize primary UIC %pOF\n", np); - irq_set_default_host(primary_uic->irqhost); + irq_set_default_domain(primary_uic->irqhost); of_node_put(np); /* The scan again for cascaded UICs */ @@ -327,5 +328,5 @@ unsigned int uic_get_irq(void) msr = mfdcr(primary_uic->dcrbase + UIC_MSR); src = 32 - ffs(msr); - return irq_linear_revmap(primary_uic->irqhost, src); + return irq_find_mapping(primary_uic->irqhost, src); } diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile deleted file mode 100644 index 2071a0abe09b..000000000000 --- a/arch/powerpc/platforms/4xx/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-y += uic.o machine_check.o -obj-$(CONFIG_4xx_SOC) += soc.o -obj-$(CONFIG_PCI) += pci.o -obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o -obj-$(CONFIG_PPC4xx_CPM) += cpm.o -obj-$(CONFIG_PPC4xx_GPIO) += gpio.o diff --git a/arch/powerpc/platforms/4xx/machine_check.c b/arch/powerpc/platforms/4xx/machine_check.c deleted file mode 100644 index a905da1d6f41..000000000000 --- a/arch/powerpc/platforms/4xx/machine_check.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include <linux/kernel.h> -#include <linux/printk.h> -#include <linux/ptrace.h> - -#include <asm/reg.h> - -int machine_check_4xx(struct pt_regs *regs) -{ - unsigned long reason = regs->esr; - - if (reason & ESR_IMCP) { - printk("Instruction"); - mtspr(SPRN_ESR, reason & ~ESR_IMCP); - } else - printk("Data"); - printk(" machine check in kernel mode.\n"); - - return 0; -} diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index e995eb30bf09..2cf3c6237337 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -188,7 +188,8 @@ mpc5121_ads_cpld_pic_init(void) cpld_pic_node = of_node_get(np); - cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL); + cpld_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 16, + &cpld_pic_host_ops, NULL); if (!cpld_pic_host) { printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); goto end; diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c index 4a25b6b48615..9668b052cd4b 100644 --- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c +++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c @@ -504,7 +504,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match); static struct platform_driver mpc512x_lpbfifo_driver = { .probe = mpc512x_lpbfifo_probe, - .remove_new = mpc512x_lpbfifo_remove, + .remove = mpc512x_lpbfifo_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc512x_lpbfifo_match, diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index 8f75e9574c27..8c1f3b629fc7 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -279,7 +279,7 @@ static void __init mpc512x_setup_diu(void) * and so negatively affect boot time. Instead we reserve the * already configured frame buffer area so that it won't be * destroyed. The starting address of the area to reserve and - * also it's length is passed to memblock_reserve(). It will be + * also its length is passed to memblock_reserve(). It will be * freed later on first open of fbdev, when splash image is not * needed any more. */ diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 37a67120f257..a7172f9ebaad 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -13,6 +13,7 @@ #include <generated/utsrelease.h> #include <linux/pci.h> #include <linux/of.h> +#include <linux/seq_file.h> #include <asm/dma.h> #include <asm/time.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index 0b12647e7b42..0ec2522ee4ad 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -203,7 +203,8 @@ lite5200_wakeup: /* HIDs, MSR */ LOAD_SPRN(HID1, 0x19) - LOAD_SPRN(HID2, 0x1a) + /* FIXME: Should this use HID2_G2_LE? */ + LOAD_SPRN(HID2_750FX, 0x1a) /* address translation is tricky (see turn_on_mmu) */ @@ -283,7 +284,8 @@ SYM_FUNC_START_LOCAL(save_regs) SAVE_SPRN(HID0, 0x18) SAVE_SPRN(HID1, 0x19) - SAVE_SPRN(HID2, 0x1a) + /* FIXME: Should this use HID2_G2_LE? */ + SAVE_SPRN(HID2_750FX, 0x1a) mfmsr r10 stw r10, (4*0x1b)(r4) /*SAVE_SPRN(LR, 0x1c) have to save it before the call */ diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 19626cd42406..bc7f83cfec1d 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -168,7 +168,7 @@ static void __init media5200_init_irq(void) spin_lock_init(&media5200_irq.lock); - media5200_irq.irqhost = irq_domain_add_linear(fpga_np, + media5200_irq.irqhost = irq_domain_create_linear(of_fwnode_handle(fpga_np), MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq); if (!media5200_irq.irqhost) goto out; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index b4938e344f71..253421ffb4e5 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c @@ -12,12 +12,10 @@ #undef DEBUG -#include <linux/gpio.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/of_gpio.h> #include <linux/export.h> #include <asm/io.h> #include <asm/mpc52xx.h> diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 581059527c36..bda707d848a6 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -48,6 +48,7 @@ * the output mode. This driver does not change the output mode setting. */ +#include <linux/gpio/driver.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -56,7 +57,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/property.h> @@ -247,9 +247,9 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) if (!cascade_virq) return; - gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt); + gpt->irqhost = irq_domain_create_linear(of_fwnode_handle(node), 1, &mpc52xx_gpt_irq_ops, gpt); if (!gpt->irqhost) { - dev_err(gpt->dev, "irq_domain_add_linear() failed\n"); + dev_err(gpt->dev, "irq_domain_create_linear() failed\n"); return; } @@ -280,7 +280,7 @@ static int mpc52xx_gpt_gpio_get(struct gpio_chip *gc, unsigned int gpio) return (in_be32(&gpt->regs->status) >> 8) & 1; } -static void +static int mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v) { struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc); @@ -293,6 +293,8 @@ mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v) raw_spin_lock_irqsave(&gpt->lock, flags); clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_GPIO_MASK, r); raw_spin_unlock_irqrestore(&gpt->lock, flags); + + return 0; } static int mpc52xx_gpt_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -334,7 +336,7 @@ static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in; gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out; gpt->gc.get = mpc52xx_gpt_gpio_get; - gpt->gc.set = mpc52xx_gpt_gpio_set; + gpt->gc.set_rv = mpc52xx_gpt_gpio_set; gpt->gc.base = -1; gpt->gc.parent = gpt->dev; @@ -369,7 +371,7 @@ struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq) mutex_lock(&mpc52xx_gpt_list_mutex); list_for_each(pos, &mpc52xx_gpt_list) { gpt = container_of(pos, struct mpc52xx_gpt_priv, list); - if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) { + if (gpt->irqhost && irq == irq_find_mapping(gpt->irqhost, 0)) { mutex_unlock(&mpc52xx_gpt_list_mutex); return gpt; } @@ -644,7 +646,6 @@ static int mpc52xx_wdt_release(struct inode *inode, struct file *file) static const struct file_operations mpc52xx_wdt_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .write = mpc52xx_wdt_write, .unlocked_ioctl = mpc52xx_wdt_ioctl, .compat_ioctl = compat_ptr_ioctl, diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 1e0a5e9644dc..eb6a4e745c08 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -446,14 +446,14 @@ void __init mpc52xx_init_irq(void) * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ - mpc52xx_irqhost = irq_domain_add_linear(picnode, + mpc52xx_irqhost = irq_domain_create_linear(of_fwnode_handle(picnode), MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, NULL); if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); - irq_set_default_host(mpc52xx_irqhost); + irq_set_default_domain(mpc52xx_irqhost); pr_info("MPC52xx PIC is up and running!\n"); } @@ -515,5 +515,5 @@ unsigned int mpc52xx_get_irq(void) return 0; } - return irq_linear_revmap(mpc52xx_irqhost, irq); + return irq_find_mapping(mpc52xx_irqhost, irq); } diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 3dc65ce1f175..8f918916e631 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -128,7 +128,7 @@ static int ep8248e_mdio_probe(struct platform_device *ofdev) bus->name = "ep8248e-mdio-bitbang"; bus->parent = &ofdev->dev; - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); ret = of_mdiobus_register(bus, ofdev->dev.of_node); if (ret) diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c index c86da3f2b74b..99f0f0f41876 100644 --- a/arch/powerpc/platforms/82xx/km82xx.c +++ b/arch/powerpc/platforms/82xx/km82xx.c @@ -27,15 +27,15 @@ static void __init km82xx_pic_init(void) { - struct device_node *np = of_find_compatible_node(NULL, NULL, - "fsl,pq2-pic"); + struct device_node *np __free(device_node); + np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic"); + if (!np) { pr_err("PIC init: can not find cpm-pic node\n"); return; } cpm2_pic_init(np); - of_node_put(np); } struct cpm_pin { diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 4d8fa9ed1a67..6e37dfc6c5c9 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -92,10 +92,11 @@ static void mcu_power_off(void) mutex_unlock(&mcu->lock); } -static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct mcu *mcu = gpiochip_get_data(gc); u8 bit = 1 << (4 + gpio); + int ret; mutex_lock(&mcu->lock); if (val) @@ -103,14 +104,16 @@ static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) else mcu->reg_ctrl |= bit; - i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, mcu->reg_ctrl); + ret = i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, + mcu->reg_ctrl); mutex_unlock(&mcu->lock); + + return ret; } static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - mcu_gpio_set(gc, gpio, val); - return 0; + return mcu_gpio_set(gc, gpio, val); } static int mcu_gpiochip_add(struct mcu *mcu) @@ -123,7 +126,7 @@ static int mcu_gpiochip_add(struct mcu *mcu) gc->can_sleep = 1; gc->ngpio = MCU_NUM_GPIO; gc->base = -1; - gc->set = mcu_gpio_set; + gc->set_rv = mcu_gpio_set; gc->direction_output = mcu_gpio_dir_out; gc->parent = dev; diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index bc6bd4d0ae96..6a62ed6082c9 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S @@ -68,7 +68,8 @@ _GLOBAL(mpc83xx_enter_deep_sleep) mfspr r5, SPRN_HID0 mfspr r6, SPRN_HID1 - mfspr r7, SPRN_HID2 + /* FIXME: Should this use SPRN_HID2_G2_LE? */ + mfspr r7, SPRN_HID2_750FX stw r5, SS_HID+0(r3) stw r6, SS_HID+4(r3) @@ -396,7 +397,8 @@ mpc83xx_deep_resume: mtspr SPRN_HID0, r5 mtspr SPRN_HID1, r6 - mtspr SPRN_HID2, r7 + /* FIXME: Should this use SPRN_HID2_G2_LE? */ + mtspr SPRN_HID2_750FX, r7 lwz r4, SS_IABR+0(r3) lwz r5, SS_IABR+4(r3) diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 9315a3b69d6d..604c1b4b6d45 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -40,27 +40,6 @@ config BSC9132_QDS and dual StarCore SC3850 DSP cores. Manufacturer : Freescale Semiconductor, Inc -config MPC8540_ADS - bool "Freescale MPC8540 ADS" - select DEFAULT_UIMAGE - help - This option enables support for the MPC 8540 ADS board - -config MPC8560_ADS - bool "Freescale MPC8560 ADS" - select DEFAULT_UIMAGE - select CPM2 - help - This option enables support for the MPC 8560 ADS board - -config MPC85xx_CDS - bool "Freescale MPC85xx CDS" - select DEFAULT_UIMAGE - select PPC_I8259 - select HAVE_RAPIDIO - help - This option enables support for the MPC85xx CDS board - config MPC85xx_MDS bool "Freescale MPC8568 MDS / MPC8569 MDS / P1021 MDS" select DEFAULT_UIMAGE diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 34ce21f42623..e635b27ee718 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -143,7 +143,7 @@ static struct platform_driver gpio_halt_driver = { .of_match_table = gpio_halt_match, }, .probe = gpio_halt_probe, - .remove_new = gpio_halt_remove, + .remove = gpio_halt_remove, }; module_platform_driver(gpio_halt_driver); diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 40aa58206888..32fa5fb557c0 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -23,7 +23,7 @@ #include <asm/mpic.h> #include <asm/cacheflush.h> #include <asm/dbell.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/cputhreads.h> #include <asm/fsl_pm.h> @@ -398,6 +398,7 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) hard_irq_disable(); mpic_teardown_this_cpu(secondary); +#ifdef CONFIG_CRASH_DUMP if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { /* * We enter the crash kernel on whatever cpu crashed, @@ -406,9 +407,11 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) */ disable_threadbit = 1; disable_cpu = cpu_first_thread_sibling(cpu); - } else if (sibling != crashing_cpu && - cpu_thread_in_core(cpu) == 0 && - cpu_thread_in_core(sibling) != 0) { + } else if (sibling == crashing_cpu) { + return; + } +#endif + if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) { disable_threadbit = 2; disable_cpu = sibling; } diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 60e0b8947ce6..4b69fb321a68 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -83,7 +83,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) if (cause >> (i + 16)) break; } - return irq_linear_revmap(socrates_fpga_pic_irq_host, + return irq_find_mapping(socrates_fpga_pic_irq_host, (irq_hw_number_t)i); } @@ -278,7 +278,7 @@ void __init socrates_fpga_pic_init(struct device_node *pic) int i; /* Setup an irq_domain structure */ - socrates_fpga_pic_irq_host = irq_domain_add_linear(pic, + socrates_fpga_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(pic), SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL); if (socrates_fpga_pic_irq_host == NULL) { pr_err("FPGA PIC: Unable to allocate host\n"); diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c index 767eed98a0a8..d4fbb6eff38a 100644 --- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c +++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c @@ -149,4 +149,5 @@ static int __init t1042rdb_diu_init(void) early_initcall(t1042rdb_diu_init); +MODULE_DESCRIPTION("Freescale T1042 DIU driver"); MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 8a7e55acf090..9be33e41af6d 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -12,7 +12,7 @@ #include <linux/delay.h> #include <linux/pgtable.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/page.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index a14d9d8997a4..8623aebfac48 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -195,6 +195,13 @@ config PIN_TLB_IMMR CONFIG_PIN_TLB_DATA is also selected, it will reduce CONFIG_PIN_TLB_DATA to 24 Mbytes. +config PIN_TLB_TEXT + bool "Pinned TLB for TEXT" + depends on PIN_TLB + default y + help + This pins kernel text with 8M pages. + endmenu endmenu diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c index a18fc7c99f83..a49d4a9ab3bc 100644 --- a/arch/powerpc/platforms/8xx/cpm1-ic.c +++ b/arch/powerpc/platforms/8xx/cpm1-ic.c @@ -59,7 +59,7 @@ static int cpm_get_irq(struct irq_desc *desc) cpm_vec = in_be16(&data->reg->cpic_civr); cpm_vec >>= 11; - return irq_linear_revmap(data->host, cpm_vec); + return irq_find_mapping(data->host, cpm_vec); } static void cpm_cascade(struct irq_desc *desc) @@ -110,7 +110,8 @@ static int cpm_pic_probe(struct platform_device *pdev) out_be32(&data->reg->cpic_cimr, 0); - data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data); + data->host = irq_domain_create_linear(of_fwnode_handle(dev->of_node), + 64, &cpm_pic_host_ops, data); if (!data->host) return -ENODEV; diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c index b24d4102fbf6..7462c221115c 100644 --- a/arch/powerpc/platforms/8xx/cpm1.c +++ b/arch/powerpc/platforms/8xx/cpm1.c @@ -45,7 +45,7 @@ #include <sysdev/fsl_soc.h> #ifdef CONFIG_8xx_GPIO -#include <linux/gpio/legacy-of-mm-gpiochip.h> +#include <linux/gpio/driver.h> #endif #define CPM_MAP_SIZE (0x4000) @@ -376,7 +376,8 @@ int __init cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode) #ifdef CONFIG_8xx_GPIO struct cpm1_gpio16_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ @@ -386,19 +387,17 @@ struct cpm1_gpio16_chip { int irq[16]; }; -static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm1_gpio16_save_regs(struct cpm1_gpio16_chip *cpm1_gc) { - struct cpm1_gpio16_chip *cpm1_gc = - container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; cpm1_gc->cpdata = in_be16(&iop->dat); } static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; u16 pin_mask; pin_mask = 1 << (15 - gpio); @@ -406,11 +405,9 @@ static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be16(&iop->dat) & pin_mask); } -static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, - int value) +static void __cpm1_gpio16_set(struct cpm1_gpio16_chip *cpm1_gc, u16 pin_mask, int value) { - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; if (value) cpm1_gc->cpdata |= pin_mask; @@ -420,40 +417,39 @@ static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, out_be16(&iop->dat, cpm1_gc->cpdata); } -static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; u16 pin_mask = 1 << (15 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); - __cpm1_gpio16_set(mm_gc, pin_mask, value); + __cpm1_gpio16_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; } static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); return cpm1_gc->irq[gpio] ? : -ENXIO; } static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; unsigned long flags; u16 pin_mask = 1 << (15 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); setbits16(&iop->dir, pin_mask); - __cpm1_gpio16_set(mm_gc, pin_mask, val); + __cpm1_gpio16_set(cpm1_gc, pin_mask, val); spin_unlock_irqrestore(&cpm1_gc->lock, flags); @@ -462,9 +458,8 @@ static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; unsigned long flags; u16 pin_mask = 1 << (15 - gpio); @@ -481,11 +476,10 @@ int cpm1_gpiochip_add16(struct device *dev) { struct device_node *np = dev->of_node; struct cpm1_gpio16_chip *cpm1_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; u16 mask; - cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + cpm1_gc = devm_kzalloc(dev, sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) return -ENOMEM; @@ -499,43 +493,50 @@ int cpm1_gpiochip_add16(struct device *dev) cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++); } - mm_gc = &cpm1_gc->mm_gc; - gc = &mm_gc->gc; - - mm_gc->save_regs = cpm1_gpio16_save_regs; + gc = &cpm1_gc->gc; + gc->base = -1; gc->ngpio = 16; gc->direction_input = cpm1_gpio16_dir_in; gc->direction_output = cpm1_gpio16_dir_out; gc->get = cpm1_gpio16_get; - gc->set = cpm1_gpio16_set; + gc->set_rv = cpm1_gpio16_set; gc->to_irq = cpm1_gpio16_to_irq; gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm1_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm1_gc->regs)) + return PTR_ERR(cpm1_gc->regs); + + cpm1_gpio16_save_regs(cpm1_gc); + + return devm_gpiochip_add_data(dev, gc, cpm1_gc); } struct cpm1_gpio32_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ u32 cpdata; }; -static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm1_gpio32_save_regs(struct cpm1_gpio32_chip *cpm1_gc) { - struct cpm1_gpio32_chip *cpm1_gc = - container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; cpm1_gc->cpdata = in_be32(&iop->dat); } static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; u32 pin_mask; pin_mask = 1 << (31 - gpio); @@ -543,11 +544,9 @@ static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be32(&iop->dat) & pin_mask); } -static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, - int value) +static void __cpm1_gpio32_set(struct cpm1_gpio32_chip *cpm1_gc, u32 pin_mask, int value) { - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; if (value) cpm1_gc->cpdata |= pin_mask; @@ -557,32 +556,32 @@ static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, out_be32(&iop->dat, cpm1_gc->cpdata); } -static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); - __cpm1_gpio32_set(mm_gc, pin_mask, value); + __cpm1_gpio32_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; } static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); setbits32(&iop->dir, pin_mask); - __cpm1_gpio32_set(mm_gc, pin_mask, val); + __cpm1_gpio32_set(cpm1_gc, pin_mask, val); spin_unlock_irqrestore(&cpm1_gc->lock, flags); @@ -591,9 +590,8 @@ static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); @@ -610,28 +608,35 @@ int cpm1_gpiochip_add32(struct device *dev) { struct device_node *np = dev->of_node; struct cpm1_gpio32_chip *cpm1_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; - cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + cpm1_gc = devm_kzalloc(dev, sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) return -ENOMEM; spin_lock_init(&cpm1_gc->lock); - mm_gc = &cpm1_gc->mm_gc; - gc = &mm_gc->gc; - - mm_gc->save_regs = cpm1_gpio32_save_regs; + gc = &cpm1_gc->gc; + gc->base = -1; gc->ngpio = 32; gc->direction_input = cpm1_gpio32_dir_in; gc->direction_output = cpm1_gpio32_dir_out; gc->get = cpm1_gpio32_get; - gc->set = cpm1_gpio32_set; + gc->set_rv = cpm1_gpio32_set; gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm1_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm1_gc->regs)) + return PTR_ERR(cpm1_gc->regs); + + cpm1_gpio32_save_regs(cpm1_gc); + + return devm_gpiochip_add_data(dev, gc, cpm1_gc); } #endif /* CONFIG_8xx_GPIO */ diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c index ea6b0e523c60..933d6ab7f512 100644 --- a/arch/powerpc/platforms/8xx/pic.c +++ b/arch/powerpc/platforms/8xx/pic.c @@ -80,7 +80,7 @@ unsigned int mpc8xx_get_irq(void) if (irq == PIC_VEC_SPURRIOUS) return 0; - return irq_linear_revmap(mpc8xx_pic_host, irq); + return irq_find_mapping(mpc8xx_pic_host, irq); } @@ -146,7 +146,8 @@ void __init mpc8xx_pic_init(void) if (!siu_reg) goto out; - mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL); + mpc8xx_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 64, + &mpc8xx_pic_host_ops, NULL); if (!mpc8xx_pic_host) printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 1fd253f92a77..fea3766eac0f 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -7,7 +7,6 @@ source "arch/powerpc/platforms/chrp/Kconfig" source "arch/powerpc/platforms/512x/Kconfig" source "arch/powerpc/platforms/52xx/Kconfig" source "arch/powerpc/platforms/powermac/Kconfig" -source "arch/powerpc/platforms/maple/Kconfig" source "arch/powerpc/platforms/pasemi/Kconfig" source "arch/powerpc/platforms/ps3/Kconfig" source "arch/powerpc/platforms/cell/Kconfig" @@ -18,7 +17,6 @@ source "arch/powerpc/platforms/85xx/Kconfig" source "arch/powerpc/platforms/86xx/Kconfig" source "arch/powerpc/platforms/embedded6xx/Kconfig" source "arch/powerpc/platforms/44x/Kconfig" -source "arch/powerpc/platforms/40x/Kconfig" source "arch/powerpc/platforms/amigaone/Kconfig" source "arch/powerpc/platforms/book3s/Kconfig" source "arch/powerpc/platforms/microwatt/Kconfig" @@ -72,10 +70,6 @@ config PPC_DT_CPU_FTRS firmware provides this binding. If you're not sure say Y. -config UDBG_RTAS_CONSOLE - bool "RTAS based debug console" - depends on PPC_RTAS - config PPC_SMP_MUXED_IPI bool help @@ -188,12 +182,6 @@ config PPC_INDIRECT_PIO bool select GENERIC_IOMAP -config PPC_INDIRECT_MMIO - bool - -config PPC_IO_WORKAROUNDS - bool - source "drivers/cpufreq/Kconfig" menu "CPUIdle driver" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index b2d8c0da2ad9..613b383ed8b3 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -43,19 +43,10 @@ config PPC_8xx select HAVE_ARCH_VMAP_STACK select HUGETLBFS -config 40x - bool "AMCC 40x" - select PPC_DCR_NATIVE - select PPC_UDBG_16550 - select 4xx_SOC - select HAVE_PCI - select PPC_KUEP if PPC_KUAP - config 44x bool "AMCC 44x, 46x or 47x" select PPC_DCR_NATIVE select PPC_UDBG_16550 - select 4xx_SOC select HAVE_PCI select PHYS_64BIT select PPC_KUEP @@ -93,11 +84,8 @@ config PPC_BOOK3S_64 bool "Server processors" select PPC_FPU select PPC_HAVE_PMU_SUPPORT - select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_SPLIT_PMD_PTLOCK - select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_NUMA_BALANCING select HAVE_MOVE_PMD @@ -117,6 +105,14 @@ config PPC_BOOK3E_64 endchoice +config PPC_THP + def_bool y + depends on PPC_BOOK3S_64 + depends on PPC_RADIX_MMU || (PPC_64S_HASH_MMU && PAGE_SIZE_64KB) + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + choice prompt "CPU selection" help @@ -194,11 +190,6 @@ config E6500_CPU depends on !CC_IS_CLANG select PPC_HAS_LBARX_LHARX -config 405_CPU - bool "40x family" - depends on 40x - depends on !CC_IS_CLANG - config 440_CPU bool "440 (44x family)" depends on 44x @@ -264,7 +255,6 @@ config TARGET_CPU default "e6500" if E6500_CPU default "power4" if POWERPC64_CPU && !CPU_LITTLE_ENDIAN default "power8" if POWERPC64_CPU && CPU_LITTLE_ENDIAN - default "405" if 405_CPU default "440" if 440_CPU default "464" if 464_CPU default "476" if 476_CPU @@ -340,7 +330,7 @@ config FSL_EMB_PERF_EVENT_E500 config 4xx bool - depends on 40x || 44x + depends on 44x default y config BOOKE @@ -348,11 +338,6 @@ config BOOKE depends on PPC_E500 || 44x default y -config BOOKE_OR_40x - bool - depends on BOOKE || 40x - default y - config PTE_64BIT bool depends on 44x || PPC_E500 || PPC_86xx @@ -464,6 +449,19 @@ config PPC_RADIX_MMU_DEFAULT If you're unsure, say Y. +config PPC_RADIX_BROADCAST_TLBIE + bool + depends on PPC_RADIX_MMU + help + Power ISA v3.0 and later implementations in the Linux Compliancy Subset + and lower are not required to implement broadcast TLBIE instructions. + Platforms with CPUs that do implement TLBIE broadcast, that is, where + a TLB invalidation instruction performed on one CPU operates on the + TLBs of all CPUs in the system, should select this option. If this + option is selected, the disable_tlbie kernel command line option can + be used to cause global TLB invalidations to be done via IPIs; without + it, IPIs will be used unconditionally. + config PPC_KERNEL_PREFIXED depends on PPC_HAVE_PREFIXED_SUPPORT depends on CC_HAS_PREFIXED @@ -495,8 +493,8 @@ config PPC_KERNEL_PCREL This option builds the kernel with the pc relative ABI model. config PPC_KUEP - bool "Kernel Userspace Execution Prevention" if !40x - default y if !40x + bool "Kernel Userspace Execution Prevention" + default y help Enable support for Kernel Userspace Execution Prevention (KUEP) @@ -582,7 +580,7 @@ config NR_CPUS config NOT_COHERENT_CACHE bool - depends on 4xx || PPC_8xx || PPC_MPC512x || \ + depends on 44x || PPC_8xx || PPC_MPC512x || \ GAMECUBE_COMMON || AMIGAONE select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index 94470fb27c99..3cee4a842736 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -4,8 +4,6 @@ obj-$(CONFIG_FSL_ULI1575) += fsl_uli1575.o obj-$(CONFIG_PPC_PMAC) += powermac/ obj-$(CONFIG_PPC_CHRP) += chrp/ -obj-$(CONFIG_4xx) += 4xx/ -obj-$(CONFIG_40x) += 40x/ obj-$(CONFIG_44x) += 44x/ obj-$(CONFIG_PPC_MPC512x) += 512x/ obj-$(CONFIG_PPC_MPC52xx) += 52xx/ @@ -16,7 +14,6 @@ obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/ obj-$(CONFIG_PPC_86xx) += 86xx/ obj-$(CONFIG_PPC_POWERNV) += powernv/ obj-$(CONFIG_PPC_PSERIES) += pseries/ -obj-$(CONFIG_PPC_MAPLE) += maple/ obj-$(CONFIG_PPC_PASEMI) += pasemi/ obj-$(CONFIG_PPC_CELL) += cell/ obj-$(CONFIG_PPC_PS3) += ps3/ diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c index 2c8dc0886912..33f852a7625f 100644 --- a/arch/powerpc/platforms/amigaone/setup.c +++ b/arch/powerpc/platforms/amigaone/setup.c @@ -109,7 +109,7 @@ static void __init amigaone_init_IRQ(void) i8259_init(pic, int_ack); ppc_md.get_irq = i8259_irq; - irq_set_default_host(i8259_get_host()); + irq_set_default_domain(i8259_get_host()); } static int __init request_isa_regions(void) diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index f381b177ea06..dc6f75d3ac6e 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -464,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +/* + * During mmap() paste address, mapping VMA is saved in VAS window + * struct which is used to unmap during migration if the window is + * still open. But the user space can remove this mapping with + * munmap() before closing the window and the VMA address will + * be invalid. Set VAS window VMA to NULL in this function which + * is called before VMA free. + */ +static void vas_mmap_close(struct vm_area_struct *vma) +{ + struct file *fp = vma->vm_file; + struct coproc_instance *cp_inst = fp->private_data; + struct vas_window *txwin; + + /* Should not happen */ + if (!cp_inst || !cp_inst->txwin) { + pr_err("No attached VAS window for the paste address mmap\n"); + return; + } + + txwin = cp_inst->txwin; + /* + * task_ref.vma is set in coproc_mmap() during mmap paste + * address. So it has to be the same VMA that is getting freed. + */ + if (WARN_ON(txwin->task_ref.vma != vma)) { + pr_err("Invalid paste address mmaping\n"); + return; + } + + mutex_lock(&txwin->task_ref.mmap_mutex); + txwin->task_ref.vma = NULL; + mutex_unlock(&txwin->task_ref.mmap_mutex); +} + static const struct vm_operations_struct vas_vm_ops = { + .close = vas_mmap_close, .fault = vas_mmap_fault, }; @@ -485,6 +521,15 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) return -EINVAL; } + /* + * Map complete page to the paste address. So the user + * space should pass 0ULL to the offset parameter. + */ + if (vma->vm_pgoff) { + pr_debug("Page offset unsupported to map paste address\n"); + return -EINVAL; + } + /* Ensure instance has an open send window */ if (!txwin) { pr_err("No send window open?\n"); diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 34669b060f36..db65bfcd1e74 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -3,42 +3,6 @@ config PPC_CELL select PPC_64S_HASH_MMU if PPC64 bool -config PPC_CELL_COMMON - bool - select PPC_CELL - select PPC_DCR_MMIO - select PPC_INDIRECT_PIO - select PPC_INDIRECT_MMIO - select PPC_HASH_MMU_NATIVE - select PPC_RTAS - select IRQ_EDGE_EOI_HANDLER - -config PPC_CELL_NATIVE - bool - select PPC_CELL_COMMON - select MPIC - select PPC_IO_WORKAROUNDS - select IBM_EMAC_EMAC4 if IBM_EMAC - select IBM_EMAC_RGMII if IBM_EMAC - select IBM_EMAC_ZMII if IBM_EMAC #test only - select IBM_EMAC_TAH if IBM_EMAC #test only - -config PPC_IBM_CELL_BLADE - bool "IBM Cell Blade" - depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN - select PPC_CELL_NATIVE - select PPC_OF_PLATFORM_PCI - select FORCE_PCI - select MMIO_NVRAM - select PPC_UDBG_16550 - select UDBG_RTAS_CONSOLE - -config AXON_MSI - bool - depends on PPC_IBM_CELL_BLADE && PCI_MSI - select IRQ_DOMAIN_NOMAP - default y - menu "Cell Broadband Engine options" depends on PPC_CELL @@ -57,48 +21,4 @@ config SPU_BASE bool select PPC_COPRO_BASE -config CBE_RAS - bool "RAS features for bare metal Cell BE" - depends on PPC_CELL_NATIVE - default y - -config PPC_IBM_CELL_RESETBUTTON - bool "IBM Cell Blade Pinhole reset button" - depends on CBE_RAS && PPC_IBM_CELL_BLADE - default y - help - Support Pinhole Resetbutton on IBM Cell blades. - This adds a method to trigger system reset via front panel pinhole button. - -config PPC_IBM_CELL_POWERBUTTON - tristate "IBM Cell Blade power button" - depends on PPC_IBM_CELL_BLADE && INPUT_EVDEV - default y - help - Support Powerbutton on IBM Cell blades. - This will enable the powerbutton as an input device. - -config CBE_THERM - tristate "CBE thermal support" - default m - depends on CBE_RAS && SPU_BASE - -config PPC_PMI - tristate - default y - depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON - help - PMI (Platform Management Interrupt) is a way to - communicate with the BMC (Baseboard Management Controller). - It is used in some IBM Cell blades. - -config CBE_CPUFREQ_SPU_GOVERNOR - tristate "CBE frequency scaling based on SPU usage" - depends on SPU_FS && CPU_FREQ - default m - help - This governor checks for spu usage to adjust the cpu frequency. - If no spu is running on a given cpu, that cpu will be throttled to - the minimal possible frequency. - endmenu diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index 7ea6692f67e2..7e5ff239c376 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -1,27 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o - -obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ - pmu.o spider-pci.o -obj-$(CONFIG_CBE_RAS) += ras.o - -obj-$(CONFIG_CBE_THERM) += cbe_thermal.o -obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o - -obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o - -ifdef CONFIG_SMP -obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o -endif - -# needed only when building loadable spufs.ko -spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o -spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o - obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ spu_syscalls.o \ - $(spu-priv1-y) \ - $(spu-manage-y) \ spufs/ - -obj-$(CONFIG_AXON_MSI) += axon_msi.o diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c deleted file mode 100644 index 28dc86744cac..000000000000 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ /dev/null @@ -1,481 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2007, Michael Ellerman, IBM Corporation. - */ - - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/msi.h> -#include <linux/export.h> -#include <linux/slab.h> -#include <linux/debugfs.h> -#include <linux/of.h> -#include <linux/of_irq.h> -#include <linux/platform_device.h> - -#include <asm/dcr.h> -#include <asm/machdep.h> - -#include "cell.h" - -/* - * MSIC registers, specified as offsets from dcr_base - */ -#define MSIC_CTRL_REG 0x0 - -/* Base Address registers specify FIFO location in BE memory */ -#define MSIC_BASE_ADDR_HI_REG 0x3 -#define MSIC_BASE_ADDR_LO_REG 0x4 - -/* Hold the read/write offsets into the FIFO */ -#define MSIC_READ_OFFSET_REG 0x5 -#define MSIC_WRITE_OFFSET_REG 0x6 - - -/* MSIC control register flags */ -#define MSIC_CTRL_ENABLE 0x0001 -#define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 -#define MSIC_CTRL_IRQ_ENABLE 0x0008 -#define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 - -/* - * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. - * Currently we're using a 64KB FIFO size. - */ -#define MSIC_FIFO_SIZE_SHIFT 16 -#define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) - -/* - * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits - * 8-9 of the MSIC control reg. - */ -#define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) - -/* - * We need to mask the read/write offsets to make sure they stay within - * the bounds of the FIFO. Also they should always be 16-byte aligned. - */ -#define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) - -/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ -#define MSIC_FIFO_ENTRY_SIZE 0x10 - - -struct axon_msic { - struct irq_domain *irq_domain; - __le32 *fifo_virt; - dma_addr_t fifo_phys; - dcr_host_t dcr_host; - u32 read_offset; -#ifdef DEBUG - u32 __iomem *trigger; -#endif -}; - -#ifdef DEBUG -void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); -#else -static inline void axon_msi_debug_setup(struct device_node *dn, - struct axon_msic *msic) { } -#endif - - -static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) -{ - pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); - - dcr_write(msic->dcr_host, dcr_n, val); -} - -static void axon_msi_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct axon_msic *msic = irq_desc_get_handler_data(desc); - u32 write_offset, msi; - int idx; - int retry = 0; - - write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); - pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); - - /* write_offset doesn't wrap properly, so we have to mask it */ - write_offset &= MSIC_FIFO_SIZE_MASK; - - while (msic->read_offset != write_offset && retry < 100) { - idx = msic->read_offset / sizeof(__le32); - msi = le32_to_cpu(msic->fifo_virt[idx]); - msi &= 0xFFFF; - - pr_devel("axon_msi: woff %x roff %x msi %x\n", - write_offset, msic->read_offset, msi); - - if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { - generic_handle_irq(msi); - msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); - } else { - /* - * Reading the MSIC_WRITE_OFFSET_REG does not - * reliably flush the outstanding DMA to the - * FIFO buffer. Here we were reading stale - * data, so we need to retry. - */ - udelay(1); - retry++; - pr_devel("axon_msi: invalid irq 0x%x!\n", msi); - continue; - } - - if (retry) { - pr_devel("axon_msi: late irq 0x%x, retry %d\n", - msi, retry); - retry = 0; - } - - msic->read_offset += MSIC_FIFO_ENTRY_SIZE; - msic->read_offset &= MSIC_FIFO_SIZE_MASK; - } - - if (retry) { - printk(KERN_WARNING "axon_msi: irq timed out\n"); - - msic->read_offset += MSIC_FIFO_ENTRY_SIZE; - msic->read_offset &= MSIC_FIFO_SIZE_MASK; - } - - chip->irq_eoi(&desc->irq_data); -} - -static struct axon_msic *find_msi_translator(struct pci_dev *dev) -{ - struct irq_domain *irq_domain; - struct device_node *dn, *tmp; - const phandle *ph; - struct axon_msic *msic = NULL; - - dn = of_node_get(pci_device_to_OF_node(dev)); - if (!dn) { - dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); - return NULL; - } - - for (; dn; dn = of_get_next_parent(dn)) { - ph = of_get_property(dn, "msi-translator", NULL); - if (ph) - break; - } - - if (!ph) { - dev_dbg(&dev->dev, - "axon_msi: no msi-translator property found\n"); - goto out_error; - } - - tmp = dn; - dn = of_find_node_by_phandle(*ph); - of_node_put(tmp); - if (!dn) { - dev_dbg(&dev->dev, - "axon_msi: msi-translator doesn't point to a node\n"); - goto out_error; - } - - irq_domain = irq_find_host(dn); - if (!irq_domain) { - dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n", - dn); - goto out_error; - } - - msic = irq_domain->host_data; - -out_error: - of_node_put(dn); - - return msic; -} - -static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) -{ - struct device_node *dn; - int len; - const u32 *prop; - - dn = of_node_get(pci_device_to_OF_node(dev)); - if (!dn) { - dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); - return -ENODEV; - } - - for (; dn; dn = of_get_next_parent(dn)) { - if (!dev->no_64bit_msi) { - prop = of_get_property(dn, "msi-address-64", &len); - if (prop) - break; - } - - prop = of_get_property(dn, "msi-address-32", &len); - if (prop) - break; - } - - if (!prop) { - dev_dbg(&dev->dev, - "axon_msi: no msi-address-(32|64) properties found\n"); - of_node_put(dn); - return -ENOENT; - } - - switch (len) { - case 8: - msg->address_hi = prop[0]; - msg->address_lo = prop[1]; - break; - case 4: - msg->address_hi = 0; - msg->address_lo = prop[0]; - break; - default: - dev_dbg(&dev->dev, - "axon_msi: malformed msi-address-(32|64) property\n"); - of_node_put(dn); - return -EINVAL; - } - - of_node_put(dn); - - return 0; -} - -static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - unsigned int virq, rc; - struct msi_desc *entry; - struct msi_msg msg; - struct axon_msic *msic; - - msic = find_msi_translator(dev); - if (!msic) - return -ENODEV; - - rc = setup_msi_msg_address(dev, &msg); - if (rc) - return rc; - - msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { - virq = irq_create_direct_mapping(msic->irq_domain); - if (!virq) { - dev_warn(&dev->dev, - "axon_msi: virq allocation failed!\n"); - return -1; - } - dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); - - irq_set_msi_desc(virq, entry); - msg.data = virq; - pci_write_msi_msg(virq, &msg); - } - - return 0; -} - -static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) -{ - struct msi_desc *entry; - - dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); - - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) { - irq_set_msi_desc(entry->irq, NULL); - irq_dispose_mapping(entry->irq); - entry->irq = 0; - } -} - -static struct irq_chip msic_irq_chip = { - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, - .irq_shutdown = pci_msi_mask_irq, - .name = "AXON-MSI", -}; - -static int msic_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - irq_set_chip_data(virq, h->host_data); - irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); - - return 0; -} - -static const struct irq_domain_ops msic_host_ops = { - .map = msic_host_map, -}; - -static void axon_msi_shutdown(struct platform_device *device) -{ - struct axon_msic *msic = dev_get_drvdata(&device->dev); - u32 tmp; - - pr_devel("axon_msi: disabling %pOF\n", - irq_domain_get_of_node(msic->irq_domain)); - tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); - tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; - msic_dcr_write(msic, MSIC_CTRL_REG, tmp); -} - -static int axon_msi_probe(struct platform_device *device) -{ - struct device_node *dn = device->dev.of_node; - struct axon_msic *msic; - unsigned int virq; - int dcr_base, dcr_len; - - pr_devel("axon_msi: setting up dn %pOF\n", dn); - - msic = kzalloc(sizeof(*msic), GFP_KERNEL); - if (!msic) { - printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n", - dn); - goto out; - } - - dcr_base = dcr_resource_start(dn, 0); - dcr_len = dcr_resource_len(dn, 0); - - if (dcr_base == 0 || dcr_len == 0) { - printk(KERN_ERR - "axon_msi: couldn't parse dcr properties on %pOF\n", - dn); - goto out_free_msic; - } - - msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); - if (!DCR_MAP_OK(msic->dcr_host)) { - printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n", - dn); - goto out_free_msic; - } - - msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, - &msic->fifo_phys, GFP_KERNEL); - if (!msic->fifo_virt) { - printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n", - dn); - goto out_free_msic; - } - - virq = irq_of_parse_and_map(dn, 0); - if (!virq) { - printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n", - dn); - goto out_free_fifo; - } - memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - - /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ - msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); - if (!msic->irq_domain) { - printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n", - dn); - goto out_free_fifo; - } - - irq_set_handler_data(virq, msic); - irq_set_chained_handler(virq, axon_msi_cascade); - pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); - - /* Enable the MSIC hardware */ - msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); - msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, - msic->fifo_phys & 0xFFFFFFFF); - msic_dcr_write(msic, MSIC_CTRL_REG, - MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | - MSIC_CTRL_FIFO_SIZE); - - msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) - & MSIC_FIFO_SIZE_MASK; - - dev_set_drvdata(&device->dev, msic); - - cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs; - cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs; - - axon_msi_debug_setup(dn, msic); - - printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn); - - return 0; - -out_free_fifo: - dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, - msic->fifo_phys); -out_free_msic: - kfree(msic); -out: - - return -1; -} - -static const struct of_device_id axon_msi_device_id[] = { - { - .compatible = "ibm,axon-msic" - }, - {} -}; - -static struct platform_driver axon_msi_driver = { - .probe = axon_msi_probe, - .shutdown = axon_msi_shutdown, - .driver = { - .name = "axon-msi", - .of_match_table = axon_msi_device_id, - }, -}; - -static int __init axon_msi_init(void) -{ - return platform_driver_register(&axon_msi_driver); -} -subsys_initcall(axon_msi_init); - - -#ifdef DEBUG -static int msic_set(void *data, u64 val) -{ - struct axon_msic *msic = data; - out_le32(msic->trigger, val); - return 0; -} - -static int msic_get(void *data, u64 *val) -{ - *val = 0; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); - -void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) -{ - char name[8]; - struct resource res; - - if (of_address_to_resource(dn, 0, &res)) { - pr_devel("axon_msi: couldn't get reg property\n"); - return; - } - - msic->trigger = ioremap(res.start, 0x4); - if (!msic->trigger) { - pr_devel("axon_msi: ioremap failed\n"); - return; - } - - snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); - - debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic); -} -#endif /* DEBUG */ diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c deleted file mode 100644 index a3ee397486f6..000000000000 --- a/arch/powerpc/platforms/cell/cbe_powerbutton.c +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * driver for powerbutton on IBM cell blades - * - * (C) Copyright IBM Corp. 2005-2008 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/input.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/platform_device.h> -#include <asm/pmi.h> - -static struct input_dev *button_dev; -static struct platform_device *button_pdev; - -static void cbe_powerbutton_handle_pmi(pmi_message_t pmi_msg) -{ - BUG_ON(pmi_msg.type != PMI_TYPE_POWER_BUTTON); - - input_report_key(button_dev, KEY_POWER, 1); - input_sync(button_dev); - input_report_key(button_dev, KEY_POWER, 0); - input_sync(button_dev); -} - -static struct pmi_handler cbe_pmi_handler = { - .type = PMI_TYPE_POWER_BUTTON, - .handle_pmi_message = cbe_powerbutton_handle_pmi, -}; - -static int __init cbe_powerbutton_init(void) -{ - int ret = 0; - struct input_dev *dev; - - if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) { - printk(KERN_ERR "%s: Not a cell blade.\n", __func__); - ret = -ENODEV; - goto out; - } - - dev = input_allocate_device(); - if (!dev) { - ret = -ENOMEM; - printk(KERN_ERR "%s: Not enough memory.\n", __func__); - goto out; - } - - set_bit(EV_KEY, dev->evbit); - set_bit(KEY_POWER, dev->keybit); - - dev->name = "Power Button"; - dev->id.bustype = BUS_HOST; - - /* this makes the button look like an acpi power button - * no clue whether anyone relies on that though */ - dev->id.product = 0x02; - dev->phys = "LNXPWRBN/button/input0"; - - button_pdev = platform_device_register_simple("power_button", 0, NULL, 0); - if (IS_ERR(button_pdev)) { - ret = PTR_ERR(button_pdev); - goto out_free_input; - } - - dev->dev.parent = &button_pdev->dev; - ret = input_register_device(dev); - if (ret) { - printk(KERN_ERR "%s: Failed to register device\n", __func__); - goto out_free_pdev; - } - - button_dev = dev; - - ret = pmi_register_handler(&cbe_pmi_handler); - if (ret) { - printk(KERN_ERR "%s: Failed to register with pmi.\n", __func__); - goto out_free_pdev; - } - - goto out; - -out_free_pdev: - platform_device_unregister(button_pdev); -out_free_input: - input_free_device(dev); -out: - return ret; -} - -static void __exit cbe_powerbutton_exit(void) -{ - pmi_unregister_handler(&cbe_pmi_handler); - platform_device_unregister(button_pdev); - input_free_device(button_dev); -} - -module_init(cbe_powerbutton_init); -module_exit(cbe_powerbutton_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c deleted file mode 100644 index 99b3558753e9..000000000000 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ /dev/null @@ -1,298 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * cbe_regs.c - * - * Accessor routines for the various MMIO register blocks of the CBE - * - * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. - */ - -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/export.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/pgtable.h> - -#include <asm/io.h> -#include <asm/ptrace.h> -#include <asm/cell-regs.h> - -/* - * Current implementation uses "cpu" nodes. We build our own mapping - * array of cpu numbers to cpu nodes locally for now to allow interrupt - * time code to have a fast path rather than call of_get_cpu_node(). If - * we implement cpu hotplug, we'll have to install an appropriate notifier - * in order to release references to the cpu going away - */ -static struct cbe_regs_map -{ - struct device_node *cpu_node; - struct device_node *be_node; - struct cbe_pmd_regs __iomem *pmd_regs; - struct cbe_iic_regs __iomem *iic_regs; - struct cbe_mic_tm_regs __iomem *mic_tm_regs; - struct cbe_pmd_shadow_regs pmd_shadow_regs; -} cbe_regs_maps[MAX_CBE]; -static int cbe_regs_map_count; - -static struct cbe_thread_map -{ - struct device_node *cpu_node; - struct device_node *be_node; - struct cbe_regs_map *regs; - unsigned int thread_id; - unsigned int cbe_id; -} cbe_thread_map[NR_CPUS]; - -static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; -static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; - -static struct cbe_regs_map *cbe_find_map(struct device_node *np) -{ - int i; - struct device_node *tmp_np; - - if (!of_node_is_type(np, "spe")) { - for (i = 0; i < cbe_regs_map_count; i++) - if (cbe_regs_maps[i].cpu_node == np || - cbe_regs_maps[i].be_node == np) - return &cbe_regs_maps[i]; - return NULL; - } - - if (np->data) - return np->data; - - /* walk up path until cpu or be node was found */ - tmp_np = np; - do { - tmp_np = tmp_np->parent; - /* on a correct devicetree we wont get up to root */ - BUG_ON(!tmp_np); - } while (!of_node_is_type(tmp_np, "cpu") || - !of_node_is_type(tmp_np, "be")); - - np->data = cbe_find_map(tmp_np); - - return np->data; -} - -struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->pmd_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_pmd_regs); - -struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->pmd_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs); - -struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return &map->pmd_shadow_regs; -} - -struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return &map->pmd_shadow_regs; -} - -struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->iic_regs; -} - -struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->iic_regs; -} - -struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->mic_tm_regs; -} - -struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->mic_tm_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); - -u32 cbe_get_hw_thread_id(int cpu) -{ - return cbe_thread_map[cpu].thread_id; -} -EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); - -u32 cbe_cpu_to_node(int cpu) -{ - return cbe_thread_map[cpu].cbe_id; -} -EXPORT_SYMBOL_GPL(cbe_cpu_to_node); - -u32 cbe_node_to_cpu(int node) -{ - return cpumask_first(&cbe_local_mask[node]); - -} -EXPORT_SYMBOL_GPL(cbe_node_to_cpu); - -static struct device_node *__init cbe_get_be_node(int cpu_id) -{ - struct device_node *np; - - for_each_node_by_type (np, "be") { - int len,i; - const phandle *cpu_handle; - - cpu_handle = of_get_property(np, "cpus", &len); - - /* - * the CAB SLOF tree is non compliant, so we just assume - * there is only one node - */ - if (WARN_ON_ONCE(!cpu_handle)) - return np; - - for (i = 0; i < len; i++) { - struct device_node *ch_np = of_find_node_by_phandle(cpu_handle[i]); - struct device_node *ci_np = of_get_cpu_node(cpu_id, NULL); - - of_node_put(ch_np); - of_node_put(ci_np); - - if (ch_np == ci_np) - return np; - } - } - - return NULL; -} - -static void __init cbe_fill_regs_map(struct cbe_regs_map *map) -{ - if(map->be_node) { - struct device_node *be, *np, *parent_np; - - be = map->be_node; - - for_each_node_by_type(np, "pervasive") { - parent_np = of_get_parent(np); - if (parent_np == be) - map->pmd_regs = of_iomap(np, 0); - of_node_put(parent_np); - } - - for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") { - parent_np = of_get_parent(np); - if (parent_np == be) - map->iic_regs = of_iomap(np, 2); - of_node_put(parent_np); - } - - for_each_node_by_type(np, "mic-tm") { - parent_np = of_get_parent(np); - if (parent_np == be) - map->mic_tm_regs = of_iomap(np, 0); - of_node_put(parent_np); - } - } else { - struct device_node *cpu; - /* That hack must die die die ! */ - const struct address_prop { - unsigned long address; - unsigned int len; - } __attribute__((packed)) *prop; - - cpu = map->cpu_node; - - prop = of_get_property(cpu, "pervasive", NULL); - if (prop != NULL) - map->pmd_regs = ioremap(prop->address, prop->len); - - prop = of_get_property(cpu, "iic", NULL); - if (prop != NULL) - map->iic_regs = ioremap(prop->address, prop->len); - - prop = of_get_property(cpu, "mic-tm", NULL); - if (prop != NULL) - map->mic_tm_regs = ioremap(prop->address, prop->len); - } -} - - -void __init cbe_regs_init(void) -{ - int i; - unsigned int thread_id; - struct device_node *cpu; - - /* Build local fast map of CPUs */ - for_each_possible_cpu(i) { - cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); - cbe_thread_map[i].be_node = cbe_get_be_node(i); - cbe_thread_map[i].thread_id = thread_id; - } - - /* Find maps for each device tree CPU */ - for_each_node_by_type(cpu, "cpu") { - struct cbe_regs_map *map; - unsigned int cbe_id; - - cbe_id = cbe_regs_map_count++; - map = &cbe_regs_maps[cbe_id]; - - if (cbe_regs_map_count > MAX_CBE) { - printk(KERN_ERR "cbe_regs: More BE chips than supported" - "!\n"); - cbe_regs_map_count--; - of_node_put(cpu); - return; - } - of_node_put(map->cpu_node); - map->cpu_node = of_node_get(cpu); - - for_each_possible_cpu(i) { - struct cbe_thread_map *thread = &cbe_thread_map[i]; - - if (thread->cpu_node == cpu) { - thread->regs = map; - thread->cbe_id = cbe_id; - map->be_node = thread->be_node; - cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); - if(thread->thread_id == 0) - cpumask_set_cpu(i, &cbe_first_online_cpu); - } - } - - cbe_fill_regs_map(map); - } -} - diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c deleted file mode 100644 index 2f45428e32c8..000000000000 --- a/arch/powerpc/platforms/cell/cbe_thermal.c +++ /dev/null @@ -1,386 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * thermal support for the cell processor - * - * This module adds some sysfs attributes to cpu and spu nodes. - * Base for measurements are the digital thermal sensors (DTS) - * located on the chip. - * The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius - * The attributes can be found under - * /sys/devices/system/cpu/cpuX/thermal - * /sys/devices/system/spu/spuX/thermal - * - * The following attributes are added for each node: - * temperature: - * contains the current temperature measured by the DTS - * throttle_begin: - * throttling begins when temperature is greater or equal to - * throttle_begin. Setting this value to 125 prevents throttling. - * throttle_end: - * throttling is being ceased, if the temperature is lower than - * throttle_end. Due to a delay between applying throttling and - * a reduced temperature this value should be less than throttle_begin. - * A value equal to throttle_begin provides only a very little hysteresis. - * throttle_full_stop: - * If the temperatrue is greater or equal to throttle_full_stop, - * full throttling is applied to the cpu or spu. This value should be - * greater than throttle_begin and throttle_end. Setting this value to - * 65 prevents the unit from running code at all. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/module.h> -#include <linux/device.h> -#include <linux/kernel.h> -#include <linux/cpu.h> -#include <linux/stringify.h> -#include <asm/spu.h> -#include <asm/io.h> -#include <asm/cell-regs.h> - -#include "spu_priv1_mmio.h" - -#define TEMP_MIN 65 -#define TEMP_MAX 125 - -#define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \ -struct device_attribute attr_ ## _prefix ## _ ## _name = { \ - .attr = { .name = __stringify(_name), .mode = _mode }, \ - .show = _prefix ## _show_ ## _name, \ - .store = _prefix ## _store_ ## _name, \ -}; - -static inline u8 reg_to_temp(u8 reg_value) -{ - return ((reg_value & 0x3f) << 1) + TEMP_MIN; -} - -static inline u8 temp_to_reg(u8 temp) -{ - return ((temp - TEMP_MIN) >> 1) & 0x3f; -} - -static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev) -{ - struct spu *spu; - - spu = container_of(dev, struct spu, dev); - - return cbe_get_pmd_regs(spu_devnode(spu)); -} - -/* returns the value for a given spu in a given register */ -static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg) -{ - union spe_reg value; - struct spu *spu; - - spu = container_of(dev, struct spu, dev); - value.val = in_be64(®->val); - - return value.spe[spu->spe_id]; -} - -static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr, - char *buf) -{ - u8 value; - struct cbe_pmd_regs __iomem *pmd_regs; - - pmd_regs = get_pmd_regs(dev); - - value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1); - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - -static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos) -{ - u64 value; - - value = in_be64(&pmd_regs->tm_tpr.val); - /* access the corresponding byte */ - value >>= pos; - value &= 0x3F; - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - -static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) -{ - u64 reg_value; - unsigned int temp; - u64 new_value; - int ret; - - ret = sscanf(buf, "%u", &temp); - - if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX) - return -EINVAL; - - new_value = temp_to_reg(temp); - - reg_value = in_be64(&pmd_regs->tm_tpr.val); - - /* zero out bits for new value */ - reg_value &= ~(0xffull << pos); - /* set bits to new value */ - reg_value |= new_value << pos; - - out_be64(&pmd_regs->tm_tpr.val, reg_value); - return size; -} - -static ssize_t spu_show_throttle_end(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 0); -} - -static ssize_t spu_show_throttle_begin(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 8); -} - -static ssize_t spu_show_throttle_full_stop(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 16); -} - -static ssize_t spu_store_throttle_end(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 0); -} - -static ssize_t spu_store_throttle_begin(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 8); -} - -static ssize_t spu_store_throttle_full_stop(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 16); -} - -static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos) -{ - struct cbe_pmd_regs __iomem *pmd_regs; - u64 value; - - pmd_regs = cbe_get_cpu_pmd_regs(dev->id); - value = in_be64(&pmd_regs->ts_ctsr2); - - value = (value >> pos) & 0x3f; - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - - -/* shows the temperature of the DTS on the PPE, - * located near the linear thermal sensor */ -static ssize_t ppe_show_temp0(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return ppe_show_temp(dev, buf, 32); -} - -/* shows the temperature of the second DTS on the PPE */ -static ssize_t ppe_show_temp1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return ppe_show_temp(dev, buf, 0); -} - -static ssize_t ppe_show_throttle_end(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32); -} - -static ssize_t ppe_show_throttle_begin(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40); -} - -static ssize_t ppe_show_throttle_full_stop(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48); -} - -static ssize_t ppe_store_throttle_end(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32); -} - -static ssize_t ppe_store_throttle_begin(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40); -} - -static ssize_t ppe_store_throttle_full_stop(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48); -} - - -static struct device_attribute attr_spu_temperature = { - .attr = {.name = "temperature", .mode = 0400 }, - .show = spu_show_temp, -}; - -static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600); -static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600); -static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600); - - -static struct attribute *spu_attributes[] = { - &attr_spu_temperature.attr, - &attr_spu_throttle_end.attr, - &attr_spu_throttle_begin.attr, - &attr_spu_throttle_full_stop.attr, - NULL, -}; - -static const struct attribute_group spu_attribute_group = { - .name = "thermal", - .attrs = spu_attributes, -}; - -static struct device_attribute attr_ppe_temperature0 = { - .attr = {.name = "temperature0", .mode = 0400 }, - .show = ppe_show_temp0, -}; - -static struct device_attribute attr_ppe_temperature1 = { - .attr = {.name = "temperature1", .mode = 0400 }, - .show = ppe_show_temp1, -}; - -static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600); -static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600); -static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600); - -static struct attribute *ppe_attributes[] = { - &attr_ppe_temperature0.attr, - &attr_ppe_temperature1.attr, - &attr_ppe_throttle_end.attr, - &attr_ppe_throttle_begin.attr, - &attr_ppe_throttle_full_stop.attr, - NULL, -}; - -static struct attribute_group ppe_attribute_group = { - .name = "thermal", - .attrs = ppe_attributes, -}; - -/* - * initialize throttling with default values - */ -static int __init init_default_values(void) -{ - int cpu; - struct cbe_pmd_regs __iomem *pmd_regs; - struct device *dev; - union ppe_spe_reg tpr; - union spe_reg str1; - u64 str2; - union spe_reg cr1; - u64 cr2; - - /* TPR defaults */ - /* ppe - * 1F - no full stop - * 08 - dynamic throttling starts if over 80 degrees - * 03 - dynamic throttling ceases if below 70 degrees */ - tpr.ppe = 0x1F0803; - /* spe - * 10 - full stopped when over 96 degrees - * 08 - dynamic throttling starts if over 80 degrees - * 03 - dynamic throttling ceases if below 70 degrees - */ - tpr.spe = 0x100803; - - /* STR defaults */ - /* str1 - * 10 - stop 16 of 32 cycles - */ - str1.val = 0x1010101010101010ull; - /* str2 - * 10 - stop 16 of 32 cycles - */ - str2 = 0x10; - - /* CR defaults */ - /* cr1 - * 4 - normal operation - */ - cr1.val = 0x0404040404040404ull; - /* cr2 - * 4 - normal operation - */ - cr2 = 0x04; - - for_each_possible_cpu (cpu) { - pr_debug("processing cpu %d\n", cpu); - dev = get_cpu_device(cpu); - - if (!dev) { - pr_info("invalid dev pointer for cbe_thermal\n"); - return -EINVAL; - } - - pmd_regs = cbe_get_cpu_pmd_regs(dev->id); - - if (!pmd_regs) { - pr_info("invalid CBE regs pointer for cbe_thermal\n"); - return -EINVAL; - } - - out_be64(&pmd_regs->tm_str2, str2); - out_be64(&pmd_regs->tm_str1.val, str1.val); - out_be64(&pmd_regs->tm_tpr.val, tpr.val); - out_be64(&pmd_regs->tm_cr1.val, cr1.val); - out_be64(&pmd_regs->tm_cr2, cr2); - } - - return 0; -} - - -static int __init thermal_init(void) -{ - int rc = init_default_values(); - - if (rc == 0) { - spu_add_dev_attr_group(&spu_attribute_group); - cpu_add_dev_attr_group(&ppe_attribute_group); - } - - return rc; -} -module_init(thermal_init); - -static void __exit thermal_exit(void) -{ - spu_remove_dev_attr_group(&spu_attribute_group); - cpu_remove_dev_attr_group(&ppe_attribute_group); -} -module_exit(thermal_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); - diff --git a/arch/powerpc/platforms/cell/cell.h b/arch/powerpc/platforms/cell/cell.h deleted file mode 100644 index d5142e905ab3..000000000000 --- a/arch/powerpc/platforms/cell/cell.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Cell Platform common data structures - * - * Copyright 2015, Daniel Axtens, IBM Corporation - */ - -#ifndef CELL_H -#define CELL_H - -#include <asm/pci-bridge.h> - -extern struct pci_controller_ops cell_pci_controller_ops; - -#endif diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c deleted file mode 100644 index ca7849e113d7..000000000000 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * spu aware cpufreq governor for the cell processor - * - * © Copyright IBM Corporation 2006-2008 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/cpufreq.h> -#include <linux/sched.h> -#include <linux/sched/loadavg.h> -#include <linux/module.h> -#include <linux/timer.h> -#include <linux/workqueue.h> -#include <linux/atomic.h> -#include <asm/machdep.h> -#include <asm/spu.h> - -#define POLL_TIME 100000 /* in µs */ -#define EXP 753 /* exp(-1) in fixed-point */ - -struct spu_gov_info_struct { - unsigned long busy_spus; /* fixed-point */ - struct cpufreq_policy *policy; - struct delayed_work work; - unsigned int poll_int; /* µs */ -}; -static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); - -static int calc_freq(struct spu_gov_info_struct *info) -{ - int cpu; - int busy_spus; - - cpu = info->policy->cpu; - busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus); - - info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1); - pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n", - cpu, busy_spus, info->busy_spus); - - return info->policy->max * info->busy_spus / FIXED_1; -} - -static void spu_gov_work(struct work_struct *work) -{ - struct spu_gov_info_struct *info; - int delay; - unsigned long target_freq; - - info = container_of(work, struct spu_gov_info_struct, work.work); - - /* after cancel_delayed_work_sync we unset info->policy */ - BUG_ON(info->policy == NULL); - - target_freq = calc_freq(info); - __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); - - delay = usecs_to_jiffies(info->poll_int); - schedule_delayed_work_on(info->policy->cpu, &info->work, delay); -} - -static void spu_gov_init_work(struct spu_gov_info_struct *info) -{ - int delay = usecs_to_jiffies(info->poll_int); - INIT_DEFERRABLE_WORK(&info->work, spu_gov_work); - schedule_delayed_work_on(info->policy->cpu, &info->work, delay); -} - -static void spu_gov_cancel_work(struct spu_gov_info_struct *info) -{ - cancel_delayed_work_sync(&info->work); -} - -static int spu_gov_start(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); - struct spu_gov_info_struct *affected_info; - int i; - - if (!cpu_online(cpu)) { - printk(KERN_ERR "cpu %d is not online\n", cpu); - return -EINVAL; - } - - if (!policy->cur) { - printk(KERN_ERR "no cpu specified in policy\n"); - return -EINVAL; - } - - /* initialize spu_gov_info for all affected cpus */ - for_each_cpu(i, policy->cpus) { - affected_info = &per_cpu(spu_gov_info, i); - affected_info->policy = policy; - } - - info->poll_int = POLL_TIME; - - /* setup timer */ - spu_gov_init_work(info); - - return 0; -} - -static void spu_gov_stop(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); - int i; - - /* cancel timer */ - spu_gov_cancel_work(info); - - /* clean spu_gov_info for all affected cpus */ - for_each_cpu (i, policy->cpus) { - info = &per_cpu(spu_gov_info, i); - info->policy = NULL; - } -} - -static struct cpufreq_governor spu_governor = { - .name = "spudemand", - .start = spu_gov_start, - .stop = spu_gov_stop, - .owner = THIS_MODULE, -}; -cpufreq_governor_init(spu_governor); -cpufreq_governor_exit(spu_governor); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c deleted file mode 100644 index 03ee8152ee97..000000000000 --- a/arch/powerpc/platforms/cell/interrupt.c +++ /dev/null @@ -1,390 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cell Internal Interrupt Controller - * - * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) - * IBM, Corp. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Arnd Bergmann <arndb@de.ibm.com> - * - * TODO: - * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers - * vs node numbers in the setup code - * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from - * a non-active node to the active node) - */ - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> -#include <linux/export.h> -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/ioport.h> -#include <linux/kernel_stat.h> -#include <linux/pgtable.h> -#include <linux/of_address.h> - -#include <asm/io.h> -#include <asm/ptrace.h> -#include <asm/machdep.h> -#include <asm/cell-regs.h> - -#include "interrupt.h" - -struct iic { - struct cbe_iic_thread_regs __iomem *regs; - u8 target_id; - u8 eoi_stack[16]; - int eoi_ptr; - struct device_node *node; -}; - -static DEFINE_PER_CPU(struct iic, cpu_iic); -#define IIC_NODE_COUNT 2 -static struct irq_domain *iic_host; - -/* Convert between "pending" bits and hw irq number */ -static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) -{ - unsigned char unit = bits.source & 0xf; - unsigned char node = bits.source >> 4; - unsigned char class = bits.class & 3; - - /* Decode IPIs */ - if (bits.flags & CBE_IIC_IRQ_IPI) - return IIC_IRQ_TYPE_IPI | (bits.prio >> 4); - else - return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; -} - -static void iic_mask(struct irq_data *d) -{ -} - -static void iic_unmask(struct irq_data *d) -{ -} - -static void iic_eoi(struct irq_data *d) -{ - struct iic *iic = this_cpu_ptr(&cpu_iic); - out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); - BUG_ON(iic->eoi_ptr < 0); -} - -static struct irq_chip iic_chip = { - .name = "CELL-IIC", - .irq_mask = iic_mask, - .irq_unmask = iic_unmask, - .irq_eoi = iic_eoi, -}; - - -static void iic_ioexc_eoi(struct irq_data *d) -{ -} - -static void iic_ioexc_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct cbe_iic_regs __iomem *node_iic = - (void __iomem *)irq_desc_get_handler_data(desc); - unsigned int irq = irq_desc_get_irq(desc); - unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; - unsigned long bits, ack; - int cascade; - - for (;;) { - bits = in_be64(&node_iic->iic_is); - if (bits == 0) - break; - /* pre-ack edge interrupts */ - ack = bits & IIC_ISR_EDGE_MASK; - if (ack) - out_be64(&node_iic->iic_is, ack); - /* handle them */ - for (cascade = 63; cascade >= 0; cascade--) - if (bits & (0x8000000000000000UL >> cascade)) - generic_handle_domain_irq(iic_host, - base | cascade); - /* post-ack level interrupts */ - ack = bits & ~IIC_ISR_EDGE_MASK; - if (ack) - out_be64(&node_iic->iic_is, ack); - } - chip->irq_eoi(&desc->irq_data); -} - - -static struct irq_chip iic_ioexc_chip = { - .name = "CELL-IOEX", - .irq_mask = iic_mask, - .irq_unmask = iic_unmask, - .irq_eoi = iic_ioexc_eoi, -}; - -/* Get an IRQ number from the pending state register of the IIC */ -static unsigned int iic_get_irq(void) -{ - struct cbe_iic_pending_bits pending; - struct iic *iic; - unsigned int virq; - - iic = this_cpu_ptr(&cpu_iic); - *(unsigned long *) &pending = - in_be64((u64 __iomem *) &iic->regs->pending_destr); - if (!(pending.flags & CBE_IIC_IRQ_VALID)) - return 0; - virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending)); - if (!virq) - return 0; - iic->eoi_stack[++iic->eoi_ptr] = pending.prio; - BUG_ON(iic->eoi_ptr > 15); - return virq; -} - -void iic_setup_cpu(void) -{ - out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff); -} - -u8 iic_get_target_id(int cpu) -{ - return per_cpu(cpu_iic, cpu).target_id; -} - -EXPORT_SYMBOL_GPL(iic_get_target_id); - -#ifdef CONFIG_SMP - -/* Use the highest interrupt priorities for IPI */ -static inline int iic_msg_to_irq(int msg) -{ - return IIC_IRQ_TYPE_IPI + 0xf - msg; -} - -void iic_message_pass(int cpu, int msg) -{ - out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); -} - -static void iic_request_ipi(int msg) -{ - int virq; - - virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg)); - if (!virq) { - printk(KERN_ERR - "iic: failed to map IPI %s\n", smp_ipi_name[msg]); - return; - } - - /* - * If smp_request_message_ipi encounters an error it will notify - * the error. If a message is not needed it will return non-zero. - */ - if (smp_request_message_ipi(virq, msg)) - irq_dispose_mapping(virq); -} - -void iic_request_IPIs(void) -{ - iic_request_ipi(PPC_MSG_CALL_FUNCTION); - iic_request_ipi(PPC_MSG_RESCHEDULE); - iic_request_ipi(PPC_MSG_TICK_BROADCAST); - iic_request_ipi(PPC_MSG_NMI_IPI); -} - -#endif /* CONFIG_SMP */ - - -static int iic_host_match(struct irq_domain *h, struct device_node *node, - enum irq_domain_bus_token bus_token) -{ - return of_device_is_compatible(node, - "IBM,CBEA-Internal-Interrupt-Controller"); -} - -static int iic_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - switch (hw & IIC_IRQ_TYPE_MASK) { - case IIC_IRQ_TYPE_IPI: - irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); - break; - case IIC_IRQ_TYPE_IOEXC: - irq_set_chip_and_handler(virq, &iic_ioexc_chip, - handle_edge_eoi_irq); - break; - default: - irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); - } - return 0; -} - -static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) - -{ - unsigned int node, ext, unit, class; - const u32 *val; - - if (!of_device_is_compatible(ct, - "IBM,CBEA-Internal-Interrupt-Controller")) - return -ENODEV; - if (intsize != 1) - return -ENODEV; - val = of_get_property(ct, "#interrupt-cells", NULL); - if (val == NULL || *val != 1) - return -ENODEV; - - node = intspec[0] >> 24; - ext = (intspec[0] >> 16) & 0xff; - class = (intspec[0] >> 8) & 0xff; - unit = intspec[0] & 0xff; - - /* Check if node is in supported range */ - if (node > 1) - return -EINVAL; - - /* Build up interrupt number, special case for IO exceptions */ - *out_hwirq = (node << IIC_IRQ_NODE_SHIFT); - if (unit == IIC_UNIT_IIC && class == 1) - *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext; - else - *out_hwirq |= IIC_IRQ_TYPE_NORMAL | - (class << IIC_IRQ_CLASS_SHIFT) | unit; - - /* Dummy flags, ignored by iic code */ - *out_flags = IRQ_TYPE_EDGE_RISING; - - return 0; -} - -static const struct irq_domain_ops iic_host_ops = { - .match = iic_host_match, - .map = iic_host_map, - .xlate = iic_host_xlate, -}; - -static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, - struct device_node *node) -{ - /* XXX FIXME: should locate the linux CPU number from the HW cpu - * number properly. We are lucky for now - */ - struct iic *iic = &per_cpu(cpu_iic, hw_cpu); - - iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); - BUG_ON(iic->regs == NULL); - - iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); - iic->eoi_stack[0] = 0xff; - iic->node = of_node_get(node); - out_be64(&iic->regs->prio, 0); - - printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n", - hw_cpu, iic->target_id, node); -} - -static int __init setup_iic(void) -{ - struct device_node *dn; - struct resource r0, r1; - unsigned int node, cascade, found = 0; - struct cbe_iic_regs __iomem *node_iic; - const u32 *np; - - for_each_node_by_name(dn, "interrupt-controller") { - if (!of_device_is_compatible(dn, - "IBM,CBEA-Internal-Interrupt-Controller")) - continue; - np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL); - if (np == NULL) { - printk(KERN_WARNING "IIC: CPU association not found\n"); - of_node_put(dn); - return -ENODEV; - } - if (of_address_to_resource(dn, 0, &r0) || - of_address_to_resource(dn, 1, &r1)) { - printk(KERN_WARNING "IIC: Can't resolve addresses\n"); - of_node_put(dn); - return -ENODEV; - } - found++; - init_one_iic(np[0], r0.start, dn); - init_one_iic(np[1], r1.start, dn); - - /* Setup cascade for IO exceptions. XXX cleanup tricks to get - * node vs CPU etc... - * Note that we configure the IIC_IRR here with a hard coded - * priority of 1. We might want to improve that later. - */ - node = np[0] >> 1; - node_iic = cbe_get_cpu_iic_regs(np[0]); - cascade = node << IIC_IRQ_NODE_SHIFT; - cascade |= 1 << IIC_IRQ_CLASS_SHIFT; - cascade |= IIC_UNIT_IIC; - cascade = irq_create_mapping(iic_host, cascade); - if (!cascade) - continue; - /* - * irq_data is a generic pointer that gets passed back - * to us later, so the forced cast is fine. - */ - irq_set_handler_data(cascade, (void __force *)node_iic); - irq_set_chained_handler(cascade, iic_ioexc_cascade); - out_be64(&node_iic->iic_ir, - (1 << 12) /* priority */ | - (node << 4) /* dest node */ | - IIC_UNIT_THREAD_0 /* route them to thread 0 */); - /* Flush pending (make sure it triggers if there is - * anything pending - */ - out_be64(&node_iic->iic_is, 0xfffffffffffffffful); - } - - if (found) - return 0; - else - return -ENODEV; -} - -void __init iic_init_IRQ(void) -{ - /* Setup an irq host data structure */ - iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops, - NULL); - BUG_ON(iic_host == NULL); - irq_set_default_host(iic_host); - - /* Discover and initialize iics */ - if (setup_iic() < 0) - panic("IIC: Failed to initialize !\n"); - - /* Set master interrupt handling function */ - ppc_md.get_irq = iic_get_irq; - - /* Enable on current CPU */ - iic_setup_cpu(); -} - -void iic_set_interrupt_routing(int cpu, int thread, int priority) -{ - struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu); - u64 iic_ir = 0; - int node = cpu >> 1; - - /* Set which node and thread will handle the next interrupt */ - iic_ir |= CBE_IIC_IR_PRIO(priority) | - CBE_IIC_IR_DEST_NODE(node); - if (thread == 0) - iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0); - else - iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1); - out_be64(&iic_regs->iic_ir, iic_ir); -} diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h deleted file mode 100644 index a47902248541..000000000000 --- a/arch/powerpc/platforms/cell/interrupt.h +++ /dev/null @@ -1,90 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_CELL_PIC_H -#define ASM_CELL_PIC_H -#ifdef __KERNEL__ -/* - * Mapping of IIC pending bits into per-node interrupt numbers. - * - * Interrupt numbers are in the range 0...0x1ff where the top bit - * (0x100) represent the source node. Only 2 nodes are supported with - * the current code though it's trivial to extend that if necessary using - * higher level bits - * - * The bottom 8 bits are split into 2 type bits and 6 data bits that - * depend on the type: - * - * 00 (0x00 | data) : normal interrupt. data is (class << 4) | source - * 01 (0x40 | data) : IO exception. data is the exception number as - * defined by bit numbers in IIC_SR - * 10 (0x80 | data) : IPI. data is the IPI number (obtained from the priority) - * and node is always 0 (IPIs are per-cpu, their source is - * not relevant) - * 11 (0xc0 | data) : reserved - * - * In addition, interrupt number 0x80000000 is defined as always invalid - * (that is the node field is expected to never extend to move than 23 bits) - * - */ - -enum { - IIC_IRQ_INVALID = 0x80000000u, - IIC_IRQ_NODE_MASK = 0x100, - IIC_IRQ_NODE_SHIFT = 8, - IIC_IRQ_MAX = 0x1ff, - IIC_IRQ_TYPE_MASK = 0xc0, - IIC_IRQ_TYPE_NORMAL = 0x00, - IIC_IRQ_TYPE_IOEXC = 0x40, - IIC_IRQ_TYPE_IPI = 0x80, - IIC_IRQ_CLASS_SHIFT = 4, - IIC_IRQ_CLASS_0 = 0x00, - IIC_IRQ_CLASS_1 = 0x10, - IIC_IRQ_CLASS_2 = 0x20, - IIC_SOURCE_COUNT = 0x200, - - /* Here are defined the various source/dest units. Avoid using those - * definitions if you can, they are mostly here for reference - */ - IIC_UNIT_SPU_0 = 0x4, - IIC_UNIT_SPU_1 = 0x7, - IIC_UNIT_SPU_2 = 0x3, - IIC_UNIT_SPU_3 = 0x8, - IIC_UNIT_SPU_4 = 0x2, - IIC_UNIT_SPU_5 = 0x9, - IIC_UNIT_SPU_6 = 0x1, - IIC_UNIT_SPU_7 = 0xa, - IIC_UNIT_IOC_0 = 0x0, - IIC_UNIT_IOC_1 = 0xb, - IIC_UNIT_THREAD_0 = 0xe, /* target only */ - IIC_UNIT_THREAD_1 = 0xf, /* target only */ - IIC_UNIT_IIC = 0xe, /* source only (IO exceptions) */ - - /* Base numbers for the external interrupts */ - IIC_IRQ_EXT_IOIF0 = - IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_0, - IIC_IRQ_EXT_IOIF1 = - IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_1, - - /* Base numbers for the IIC_ISR interrupts */ - IIC_IRQ_IOEX_TMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 63, - IIC_IRQ_IOEX_PMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 62, - IIC_IRQ_IOEX_ATI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 61, - IIC_IRQ_IOEX_MATBFI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 60, - IIC_IRQ_IOEX_ELDI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 59, - - /* Which bits in IIC_ISR are edge sensitive */ - IIC_ISR_EDGE_MASK = 0x4ul, -}; - -extern void iic_init_IRQ(void); -extern void iic_message_pass(int cpu, int msg); -extern void iic_request_IPIs(void); -extern void iic_setup_cpu(void); - -extern u8 iic_get_target_id(int cpu); - -extern void spider_init_IRQ(void); - -extern void iic_set_interrupt_routing(int cpu, int thread, int priority); - -#endif -#endif /* ASM_CELL_PIC_H */ diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c deleted file mode 100644 index 1202a69b0a20..000000000000 --- a/arch/powerpc/platforms/cell/iommu.c +++ /dev/null @@ -1,1094 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * IOMMU implementation for Cell Broadband Processor Architecture - * - * (C) Copyright IBM Corporation 2006-2008 - * - * Author: Jeremy Kerr <jk@ozlabs.org> - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <linux/notifier.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/memblock.h> - -#include <asm/prom.h> -#include <asm/iommu.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <asm/udbg.h> -#include <asm/firmware.h> -#include <asm/cell-regs.h> - -#include "cell.h" -#include "interrupt.h" - -/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages - * instead of leaving them mapped to some dummy page. This can be - * enabled once the appropriate workarounds for spider bugs have - * been enabled - */ -#define CELL_IOMMU_REAL_UNMAP - -/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of - * IO PTEs based on the transfer direction. That can be enabled - * once spider-net has been fixed to pass the correct direction - * to the DMA mapping functions - */ -#define CELL_IOMMU_STRICT_PROTECTION - - -#define NR_IOMMUS 2 - -/* IOC mmap registers */ -#define IOC_Reg_Size 0x2000 - -#define IOC_IOPT_CacheInvd 0x908 -#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul -#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul -#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul - -#define IOC_IOST_Origin 0x918 -#define IOC_IOST_Origin_E 0x8000000000000000ul -#define IOC_IOST_Origin_HW 0x0000000000000800ul -#define IOC_IOST_Origin_HL 0x0000000000000400ul - -#define IOC_IO_ExcpStat 0x920 -#define IOC_IO_ExcpStat_V 0x8000000000000000ul -#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul -#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul -#define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul -#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul -#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul -#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful - -#define IOC_IO_ExcpMask 0x928 -#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul -#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul - -#define IOC_IOCmd_Offset 0x1000 - -#define IOC_IOCmd_Cfg 0xc00 -#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul - - -/* Segment table entries */ -#define IOSTE_V 0x8000000000000000ul /* valid */ -#define IOSTE_H 0x4000000000000000ul /* cache hint */ -#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */ -#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */ -#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */ -#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */ -#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */ -#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */ -#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */ - - -/* IOMMU sizing */ -#define IO_SEGMENT_SHIFT 28 -#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) - -/* The high bit needs to be set on every DMA address */ -#define SPIDER_DMA_OFFSET 0x80000000ul - -struct iommu_window { - struct list_head list; - struct cbe_iommu *iommu; - unsigned long offset; - unsigned long size; - unsigned int ioid; - struct iommu_table table; -}; - -#define NAMESIZE 8 -struct cbe_iommu { - int nid; - char name[NAMESIZE]; - void __iomem *xlate_regs; - void __iomem *cmd_regs; - unsigned long *stab; - unsigned long *ptab; - void *pad_page; - struct list_head windows; -}; - -/* Static array of iommus, one per node - * each contains a list of windows, keyed from dma_window property - * - on bus setup, look for a matching window, or create one - * - on dev setup, assign iommu_table ptr - */ -static struct cbe_iommu iommus[NR_IOMMUS]; -static int cbe_nr_iommus; - -static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, - long n_ptes) -{ - u64 __iomem *reg; - u64 val; - long n; - - reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; - - while (n_ptes > 0) { - /* we can invalidate up to 1 << 11 PTEs at once */ - n = min(n_ptes, 1l << 11); - val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask) - | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask) - | IOC_IOPT_CacheInvd_Busy; - - out_be64(reg, val); - while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy) - ; - - n_ptes -= n; - pte += n; - } -} - -static int tce_build_cell(struct iommu_table *tbl, long index, long npages, - unsigned long uaddr, enum dma_data_direction direction, - unsigned long attrs) -{ - int i; - unsigned long *io_pte, base_pte; - struct iommu_window *window = - container_of(tbl, struct iommu_window, table); - - /* implementing proper protection causes problems with the spidernet - * driver - check mapping directions later, but allow read & write by - * default for now.*/ -#ifdef CELL_IOMMU_STRICT_PROTECTION - /* to avoid referencing a global, we use a trick here to setup the - * protection bit. "prot" is setup to be 3 fields of 4 bits appended - * together for each of the 3 supported direction values. It is then - * shifted left so that the fields matching the desired direction - * lands on the appropriate bits, and other bits are masked out. - */ - const unsigned long prot = 0xc48; - base_pte = - ((prot << (52 + 4 * direction)) & - (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) | - CBE_IOPTE_M | CBE_IOPTE_SO_RW | - (window->ioid & CBE_IOPTE_IOID_Mask); -#else - base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | - CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); -#endif - if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING)) - base_pte &= ~CBE_IOPTE_SO_RW; - - io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); - - for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) - io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); - - mb(); - - invalidate_tce_cache(window->iommu, io_pte, npages); - - pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", - index, npages, direction, base_pte); - return 0; -} - -static void tce_free_cell(struct iommu_table *tbl, long index, long npages) -{ - - int i; - unsigned long *io_pte, pte; - struct iommu_window *window = - container_of(tbl, struct iommu_window, table); - - pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); - -#ifdef CELL_IOMMU_REAL_UNMAP - pte = 0; -#else - /* spider bridge does PCI reads after freeing - insert a mapping - * to a scratch page instead of an invalid entry */ - pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | - __pa(window->iommu->pad_page) | - (window->ioid & CBE_IOPTE_IOID_Mask); -#endif - - io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); - - for (i = 0; i < npages; i++) - io_pte[i] = pte; - - mb(); - - invalidate_tce_cache(window->iommu, io_pte, npages); -} - -static irqreturn_t ioc_interrupt(int irq, void *data) -{ - unsigned long stat, spf; - struct cbe_iommu *iommu = data; - - stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); - spf = stat & IOC_IO_ExcpStat_SPF_Mask; - - /* Might want to rate limit it */ - printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); - printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n", - !!(stat & IOC_IO_ExcpStat_V), - (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ', - (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ', - (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write", - (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask)); - printk(KERN_ERR " page=0x%016lx\n", - stat & IOC_IO_ExcpStat_ADDR_Mask); - - /* clear interrupt */ - stat &= ~IOC_IO_ExcpStat_V; - out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); - - return IRQ_HANDLED; -} - -static int __init cell_iommu_find_ioc(int nid, unsigned long *base) -{ - struct device_node *np; - struct resource r; - - *base = 0; - - /* First look for new style /be nodes */ - for_each_node_by_name(np, "ioc") { - if (of_node_to_nid(np) != nid) - continue; - if (of_address_to_resource(np, 0, &r)) { - printk(KERN_ERR "iommu: can't get address for %pOF\n", - np); - continue; - } - *base = r.start; - of_node_put(np); - return 0; - } - - /* Ok, let's try the old way */ - for_each_node_by_type(np, "cpu") { - const unsigned int *nidp; - const unsigned long *tmp; - - nidp = of_get_property(np, "node-id", NULL); - if (nidp && *nidp == nid) { - tmp = of_get_property(np, "ioc-translation", NULL); - if (tmp) { - *base = *tmp; - of_node_put(np); - return 0; - } - } - } - - return -ENODEV; -} - -static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu, - unsigned long dbase, unsigned long dsize, - unsigned long fbase, unsigned long fsize) -{ - struct page *page; - unsigned long segments, stab_size; - - segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; - - pr_debug("%s: iommu[%d]: segments: %lu\n", - __func__, iommu->nid, segments); - - /* set up the segment table */ - stab_size = segments * sizeof(unsigned long); - page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); - BUG_ON(!page); - iommu->stab = page_address(page); - memset(iommu->stab, 0, stab_size); -} - -static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu, - unsigned long base, unsigned long size, unsigned long gap_base, - unsigned long gap_size, unsigned long page_shift) -{ - struct page *page; - int i; - unsigned long reg, segments, pages_per_segment, ptab_size, - n_pte_pages, start_seg, *ptab; - - start_seg = base >> IO_SEGMENT_SHIFT; - segments = size >> IO_SEGMENT_SHIFT; - pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); - /* PTEs for each segment must start on a 4K boundary */ - pages_per_segment = max(pages_per_segment, - (1 << 12) / sizeof(unsigned long)); - - ptab_size = segments * pages_per_segment * sizeof(unsigned long); - pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, - iommu->nid, ptab_size, get_order(ptab_size)); - page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); - BUG_ON(!page); - - ptab = page_address(page); - memset(ptab, 0, ptab_size); - - /* number of 4K pages needed for a page table */ - n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; - - pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", - __func__, iommu->nid, iommu->stab, ptab, - n_pte_pages); - - /* initialise the STEs */ - reg = IOSTE_V | ((n_pte_pages - 1) << 5); - - switch (page_shift) { - case 12: reg |= IOSTE_PS_4K; break; - case 16: reg |= IOSTE_PS_64K; break; - case 20: reg |= IOSTE_PS_1M; break; - case 24: reg |= IOSTE_PS_16M; break; - default: BUG(); - } - - gap_base = gap_base >> IO_SEGMENT_SHIFT; - gap_size = gap_size >> IO_SEGMENT_SHIFT; - - pr_debug("Setting up IOMMU stab:\n"); - for (i = start_seg; i < (start_seg + segments); i++) { - if (i >= gap_base && i < (gap_base + gap_size)) { - pr_debug("\toverlap at %d, skipping\n", i); - continue; - } - iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * - (i - start_seg)); - pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); - } - - return ptab; -} - -static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu) -{ - int ret; - unsigned long reg, xlate_base; - unsigned int virq; - - if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) - panic("%s: missing IOC register mappings for node %d\n", - __func__, iommu->nid); - - iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); - iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; - - /* ensure that the STEs have updated */ - mb(); - - /* setup interrupts for the iommu. */ - reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); - out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, - reg & ~IOC_IO_ExcpStat_V); - out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, - IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); - - virq = irq_create_mapping(NULL, - IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); - BUG_ON(!virq); - - ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); - BUG_ON(ret); - - /* set the IOC segment table origin register (and turn on the iommu) */ - reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; - out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); - in_be64(iommu->xlate_regs + IOC_IOST_Origin); - - /* turn on IO translation */ - reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; - out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); -} - -static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu, - unsigned long base, unsigned long size) -{ - cell_iommu_setup_stab(iommu, base, size, 0, 0); - iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, - IOMMU_PAGE_SHIFT_4K); - cell_iommu_enable_hardware(iommu); -} - -#if 0/* Unused for now */ -static struct iommu_window *find_window(struct cbe_iommu *iommu, - unsigned long offset, unsigned long size) -{ - struct iommu_window *window; - - /* todo: check for overlapping (but not equal) windows) */ - - list_for_each_entry(window, &(iommu->windows), list) { - if (window->offset == offset && window->size == size) - return window; - } - - return NULL; -} -#endif - -static inline u32 cell_iommu_get_ioid(struct device_node *np) -{ - const u32 *ioid; - - ioid = of_get_property(np, "ioid", NULL); - if (ioid == NULL) { - printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", - np); - return 0; - } - - return *ioid; -} - -static struct iommu_table_ops cell_iommu_ops = { - .set = tce_build_cell, - .clear = tce_free_cell -}; - -static struct iommu_window * __init -cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, - unsigned long offset, unsigned long size, - unsigned long pte_offset) -{ - struct iommu_window *window; - struct page *page; - u32 ioid; - - ioid = cell_iommu_get_ioid(np); - - window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); - BUG_ON(window == NULL); - - window->offset = offset; - window->size = size; - window->ioid = ioid; - window->iommu = iommu; - - window->table.it_blocksize = 16; - window->table.it_base = (unsigned long)iommu->ptab; - window->table.it_index = iommu->nid; - window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; - window->table.it_offset = - (offset >> window->table.it_page_shift) + pte_offset; - window->table.it_size = size >> window->table.it_page_shift; - window->table.it_ops = &cell_iommu_ops; - - if (!iommu_init_table(&window->table, iommu->nid, 0, 0)) - panic("Failed to initialize iommu table"); - - pr_debug("\tioid %d\n", window->ioid); - pr_debug("\tblocksize %ld\n", window->table.it_blocksize); - pr_debug("\tbase 0x%016lx\n", window->table.it_base); - pr_debug("\toffset 0x%lx\n", window->table.it_offset); - pr_debug("\tsize %ld\n", window->table.it_size); - - list_add(&window->list, &iommu->windows); - - if (offset != 0) - return window; - - /* We need to map and reserve the first IOMMU page since it's used - * by the spider workaround. In theory, we only need to do that when - * running on spider but it doesn't really matter. - * - * This code also assumes that we have a window that starts at 0, - * which is the case on all spider based blades. - */ - page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); - BUG_ON(!page); - iommu->pad_page = page_address(page); - clear_page(iommu->pad_page); - - __set_bit(0, window->table.it_map); - tce_build_cell(&window->table, window->table.it_offset, 1, - (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); - - return window; -} - -static struct cbe_iommu *cell_iommu_for_node(int nid) -{ - int i; - - for (i = 0; i < cbe_nr_iommus; i++) - if (iommus[i].nid == nid) - return &iommus[i]; - return NULL; -} - -static unsigned long cell_dma_nommu_offset; - -static unsigned long dma_iommu_fixed_base; -static bool cell_iommu_enabled; - -/* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ -bool iommu_fixed_is_weak; - -static struct iommu_table *cell_get_iommu_table(struct device *dev) -{ - struct iommu_window *window; - struct cbe_iommu *iommu; - - /* Current implementation uses the first window available in that - * node's iommu. We -might- do something smarter later though it may - * never be necessary - */ - iommu = cell_iommu_for_node(dev_to_node(dev)); - if (iommu == NULL || list_empty(&iommu->windows)) { - dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", - dev->of_node, dev_to_node(dev)); - return NULL; - } - window = list_entry(iommu->windows.next, struct iommu_window, list); - - return &window->table; -} - -static u64 cell_iommu_get_fixed_address(struct device *dev); - -static void cell_dma_dev_setup(struct device *dev) -{ - if (cell_iommu_enabled) { - u64 addr = cell_iommu_get_fixed_address(dev); - - if (addr != OF_BAD_ADDR) - dev->archdata.dma_offset = addr + dma_iommu_fixed_base; - set_iommu_table_base(dev, cell_get_iommu_table(dev)); - } else { - dev->archdata.dma_offset = cell_dma_nommu_offset; - } -} - -static void cell_pci_dma_dev_setup(struct pci_dev *dev) -{ - cell_dma_dev_setup(&dev->dev); -} - -static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct device *dev = data; - - /* We are only interested in device addition */ - if (action != BUS_NOTIFY_ADD_DEVICE) - return 0; - - if (cell_iommu_enabled) - dev->dma_ops = &dma_iommu_ops; - cell_dma_dev_setup(dev); - return 0; -} - -static struct notifier_block cell_of_bus_notifier = { - .notifier_call = cell_of_bus_notify -}; - -static int __init cell_iommu_get_window(struct device_node *np, - unsigned long *base, - unsigned long *size) -{ - const __be32 *dma_window; - unsigned long index; - - /* Use ibm,dma-window if available, else, hard code ! */ - dma_window = of_get_property(np, "ibm,dma-window", NULL); - if (dma_window == NULL) { - *base = 0; - *size = 0x80000000u; - return -ENODEV; - } - - of_parse_dma_window(np, dma_window, &index, base, size); - return 0; -} - -static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) -{ - struct cbe_iommu *iommu; - int nid, i; - - /* Get node ID */ - nid = of_node_to_nid(np); - if (nid < 0) { - printk(KERN_ERR "iommu: failed to get node for %pOF\n", - np); - return NULL; - } - pr_debug("iommu: setting up iommu for node %d (%pOF)\n", - nid, np); - - /* XXX todo: If we can have multiple windows on the same IOMMU, which - * isn't the case today, we probably want here to check whether the - * iommu for that node is already setup. - * However, there might be issue with getting the size right so let's - * ignore that for now. We might want to completely get rid of the - * multiple window support since the cell iommu supports per-page ioids - */ - - if (cbe_nr_iommus >= NR_IOMMUS) { - printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", - np); - return NULL; - } - - /* Init base fields */ - i = cbe_nr_iommus++; - iommu = &iommus[i]; - iommu->stab = NULL; - iommu->nid = nid; - snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); - INIT_LIST_HEAD(&iommu->windows); - - return iommu; -} - -static void __init cell_iommu_init_one(struct device_node *np, - unsigned long offset) -{ - struct cbe_iommu *iommu; - unsigned long base, size; - - iommu = cell_iommu_alloc(np); - if (!iommu) - return; - - /* Obtain a window for it */ - cell_iommu_get_window(np, &base, &size); - - pr_debug("\ttranslating window 0x%lx...0x%lx\n", - base, base + size - 1); - - /* Initialize the hardware */ - cell_iommu_setup_hardware(iommu, base, size); - - /* Setup the iommu_table */ - cell_iommu_setup_window(iommu, np, base, size, - offset >> IOMMU_PAGE_SHIFT_4K); -} - -static void __init cell_disable_iommus(void) -{ - int node; - unsigned long base, val; - void __iomem *xregs, *cregs; - - /* Make sure IOC translation is disabled on all nodes */ - for_each_online_node(node) { - if (cell_iommu_find_ioc(node, &base)) - continue; - xregs = ioremap(base, IOC_Reg_Size); - if (xregs == NULL) - continue; - cregs = xregs + IOC_IOCmd_Offset; - - pr_debug("iommu: cleaning up iommu on node %d\n", node); - - out_be64(xregs + IOC_IOST_Origin, 0); - (void)in_be64(xregs + IOC_IOST_Origin); - val = in_be64(cregs + IOC_IOCmd_Cfg); - val &= ~IOC_IOCmd_Cfg_TE; - out_be64(cregs + IOC_IOCmd_Cfg, val); - (void)in_be64(cregs + IOC_IOCmd_Cfg); - - iounmap(xregs); - } -} - -static int __init cell_iommu_init_disabled(void) -{ - struct device_node *np = NULL; - unsigned long base = 0, size; - - /* When no iommu is present, we use direct DMA ops */ - - /* First make sure all IOC translation is turned off */ - cell_disable_iommus(); - - /* If we have no Axon, we set up the spider DMA magic offset */ - np = of_find_node_by_name(NULL, "axon"); - if (!np) - cell_dma_nommu_offset = SPIDER_DMA_OFFSET; - of_node_put(np); - - /* Now we need to check to see where the memory is mapped - * in PCI space. We assume that all busses use the same dma - * window which is always the case so far on Cell, thus we - * pick up the first pci-internal node we can find and check - * the DMA window from there. - */ - for_each_node_by_name(np, "axon") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - if (cell_iommu_get_window(np, &base, &size) == 0) - break; - } - if (np == NULL) { - for_each_node_by_name(np, "pci-internal") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - if (cell_iommu_get_window(np, &base, &size) == 0) - break; - } - } - of_node_put(np); - - /* If we found a DMA window, we check if it's big enough to enclose - * all of physical memory. If not, we force enable IOMMU - */ - if (np && size < memblock_end_of_DRAM()) { - printk(KERN_WARNING "iommu: force-enabled, dma window" - " (%ldMB) smaller than total memory (%lldMB)\n", - size >> 20, memblock_end_of_DRAM() >> 20); - return -ENODEV; - } - - cell_dma_nommu_offset += base; - - if (cell_dma_nommu_offset != 0) - cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; - - printk("iommu: disabled, direct DMA offset is 0x%lx\n", - cell_dma_nommu_offset); - - return 0; -} - -/* - * Fixed IOMMU mapping support - * - * This code adds support for setting up a fixed IOMMU mapping on certain - * cell machines. For 64-bit devices this avoids the performance overhead of - * mapping and unmapping pages at runtime. 32-bit devices are unable to use - * the fixed mapping. - * - * The fixed mapping is established at boot, and maps all of physical memory - * 1:1 into device space at some offset. On machines with < 30 GB of memory - * we setup the fixed mapping immediately above the normal IOMMU window. - * - * For example a machine with 4GB of memory would end up with the normal - * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In - * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to - * 3GB, plus any offset required by firmware. The firmware offset is encoded - * in the "dma-ranges" property. - * - * On machines with 30GB or more of memory, we are unable to place the fixed - * mapping above the normal IOMMU window as we would run out of address space. - * Instead we move the normal IOMMU window to coincide with the hash page - * table, this region does not need to be part of the fixed mapping as no - * device should ever be DMA'ing to it. We then setup the fixed mapping - * from 0 to 32GB. - */ - -static u64 cell_iommu_get_fixed_address(struct device *dev) -{ - u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR; - struct device_node *np; - const u32 *ranges = NULL; - int i, len, best, naddr, nsize, pna, range_size; - - /* We can be called for platform devices that have no of_node */ - np = of_node_get(dev->of_node); - if (!np) - goto out; - - while (1) { - naddr = of_n_addr_cells(np); - nsize = of_n_size_cells(np); - np = of_get_next_parent(np); - if (!np) - break; - - ranges = of_get_property(np, "dma-ranges", &len); - - /* Ignore empty ranges, they imply no translation required */ - if (ranges && len > 0) - break; - } - - if (!ranges) { - dev_dbg(dev, "iommu: no dma-ranges found\n"); - goto out; - } - - len /= sizeof(u32); - - pna = of_n_addr_cells(np); - range_size = naddr + nsize + pna; - - /* dma-ranges format: - * child addr : naddr cells - * parent addr : pna cells - * size : nsize cells - */ - for (i = 0, best = -1, best_size = 0; i < len; i += range_size) { - cpu_addr = of_translate_dma_address(np, ranges + i + naddr); - size = of_read_number(ranges + i + naddr + pna, nsize); - - if (cpu_addr == 0 && size > best_size) { - best = i; - best_size = size; - } - } - - if (best >= 0) { - dev_addr = of_read_number(ranges + best, naddr); - } else - dev_dbg(dev, "iommu: no suitable range found!\n"); - -out: - of_node_put(np); - - return dev_addr; -} - -static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask) -{ - return mask == DMA_BIT_MASK(64) && - cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR; -} - -static void __init insert_16M_pte(unsigned long addr, unsigned long *ptab, - unsigned long base_pte) -{ - unsigned long segment, offset; - - segment = addr >> IO_SEGMENT_SHIFT; - offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); - ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); - - pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", - addr, ptab, segment, offset); - - ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask); -} - -static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, - struct device_node *np, unsigned long dbase, unsigned long dsize, - unsigned long fbase, unsigned long fsize) -{ - unsigned long base_pte, uaddr, ioaddr, *ptab; - - ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); - - dma_iommu_fixed_base = fbase; - - pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); - - base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | - (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask); - - if (iommu_fixed_is_weak) - pr_info("IOMMU: Using weak ordering for fixed mapping\n"); - else { - pr_info("IOMMU: Using strong ordering for fixed mapping\n"); - base_pte |= CBE_IOPTE_SO_RW; - } - - for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { - /* Don't touch the dynamic region */ - ioaddr = uaddr + fbase; - if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { - pr_debug("iommu: fixed/dynamic overlap, skipping\n"); - continue; - } - - insert_16M_pte(uaddr, ptab, base_pte); - } - - mb(); -} - -static int __init cell_iommu_fixed_mapping_init(void) -{ - unsigned long dbase, dsize, fbase, fsize, hbase, hend; - struct cbe_iommu *iommu; - struct device_node *np; - - /* The fixed mapping is only supported on axon machines */ - np = of_find_node_by_name(NULL, "axon"); - of_node_put(np); - - if (!np) { - pr_debug("iommu: fixed mapping disabled, no axons found\n"); - return -1; - } - - /* We must have dma-ranges properties for fixed mapping to work */ - np = of_find_node_with_property(NULL, "dma-ranges"); - of_node_put(np); - - if (!np) { - pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); - return -1; - } - - /* The default setup is to have the fixed mapping sit after the - * dynamic region, so find the top of the largest IOMMU window - * on any axon, then add the size of RAM and that's our max value. - * If that is > 32GB we have to do other shennanigans. - */ - fbase = 0; - for_each_node_by_name(np, "axon") { - cell_iommu_get_window(np, &dbase, &dsize); - fbase = max(fbase, dbase + dsize); - } - - fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT); - fsize = memblock_phys_mem_size(); - - if ((fbase + fsize) <= 0x800000000ul) - hbase = 0; /* use the device tree window */ - else { - /* If we're over 32 GB we need to cheat. We can't map all of - * RAM with the fixed mapping, and also fit the dynamic - * region. So try to place the dynamic region where the hash - * table sits, drivers never need to DMA to it, we don't - * need a fixed mapping for that area. - */ - if (!htab_address) { - pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); - return -1; - } - hbase = __pa(htab_address); - hend = hbase + htab_size_bytes; - - /* The window must start and end on a segment boundary */ - if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) || - (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) { - pr_debug("iommu: hash window not segment aligned\n"); - return -1; - } - - /* Check the hash window fits inside the real DMA window */ - for_each_node_by_name(np, "axon") { - cell_iommu_get_window(np, &dbase, &dsize); - - if (hbase < dbase || (hend > (dbase + dsize))) { - pr_debug("iommu: hash window doesn't fit in" - "real DMA window\n"); - of_node_put(np); - return -1; - } - } - - fbase = 0; - } - - /* Setup the dynamic regions */ - for_each_node_by_name(np, "axon") { - iommu = cell_iommu_alloc(np); - BUG_ON(!iommu); - - if (hbase == 0) - cell_iommu_get_window(np, &dbase, &dsize); - else { - dbase = hbase; - dsize = htab_size_bytes; - } - - printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " - "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, - dbase + dsize, fbase, fbase + fsize); - - cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); - iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, - IOMMU_PAGE_SHIFT_4K); - cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, - fbase, fsize); - cell_iommu_enable_hardware(iommu); - cell_iommu_setup_window(iommu, np, dbase, dsize, 0); - } - - cell_pci_controller_ops.iommu_bypass_supported = - cell_pci_iommu_bypass_supported; - return 0; -} - -static int iommu_fixed_disabled; - -static int __init setup_iommu_fixed(char *str) -{ - struct device_node *pciep; - - if (strcmp(str, "off") == 0) - iommu_fixed_disabled = 1; - - /* If we can find a pcie-endpoint in the device tree assume that - * we're on a triblade or a CAB so by default the fixed mapping - * should be set to be weakly ordered; but only if the boot - * option WASN'T set for strong ordering - */ - pciep = of_find_node_by_type(NULL, "pcie-endpoint"); - - if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) - iommu_fixed_is_weak = true; - - of_node_put(pciep); - - return 1; -} -__setup("iommu_fixed=", setup_iommu_fixed); - -static int __init cell_iommu_init(void) -{ - struct device_node *np; - - /* If IOMMU is disabled or we have little enough RAM to not need - * to enable it, we setup a direct mapping. - * - * Note: should we make sure we have the IOMMU actually disabled ? - */ - if (iommu_is_off || - (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull)) - if (cell_iommu_init_disabled() == 0) - goto bail; - - /* Setup various callbacks */ - cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; - - if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) - goto done; - - /* Create an iommu for each /axon node. */ - for_each_node_by_name(np, "axon") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - cell_iommu_init_one(np, 0); - } - - /* Create an iommu for each toplevel /pci-internal node for - * old hardware/firmware - */ - for_each_node_by_name(np, "pci-internal") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - cell_iommu_init_one(np, SPIDER_DMA_OFFSET); - } - done: - /* Setup default PCI iommu ops */ - set_pci_dma_ops(&dma_iommu_ops); - cell_iommu_enabled = true; - bail: - /* Register callbacks on OF platform device addition/removal - * to handle linking them to the right DMA operations - */ - bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier); - - return 0; -} -machine_arch_initcall(cell, cell_iommu_init); diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c deleted file mode 100644 index 58d967ee38b3..000000000000 --- a/arch/powerpc/platforms/cell/pervasive.c +++ /dev/null @@ -1,125 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CBE Pervasive Monitor and Debug - * - * (C) Copyright IBM Corporation 2005 - * - * Authors: Maximino Aguilar (maguilar@us.ibm.com) - * Michael N. Day (mnday@us.ibm.com) - */ - -#undef DEBUG - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/kallsyms.h> -#include <linux/pgtable.h> - -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/reg.h> -#include <asm/cell-regs.h> -#include <asm/cpu_has_feature.h> - -#include "pervasive.h" -#include "ras.h" - -static void cbe_power_save(void) -{ - unsigned long ctrl, thread_switch_control; - - /* Ensure our interrupt state is properly tracked */ - if (!prep_irq_for_idle()) - return; - - ctrl = mfspr(SPRN_CTRLF); - - /* Enable DEC and EE interrupt request */ - thread_switch_control = mfspr(SPRN_TSC_CELL); - thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; - - switch (ctrl & CTRL_CT) { - case CTRL_CT0: - thread_switch_control |= TSC_CELL_DEC_ENABLE_0; - break; - case CTRL_CT1: - thread_switch_control |= TSC_CELL_DEC_ENABLE_1; - break; - default: - printk(KERN_WARNING "%s: unknown configuration\n", - __func__); - break; - } - mtspr(SPRN_TSC_CELL, thread_switch_control); - - /* - * go into low thread priority, medium priority will be - * restored for us after wake-up. - */ - HMT_low(); - - /* - * atomically disable thread execution and runlatch. - * External and Decrementer exceptions are still handled when the - * thread is disabled but now enter in cbe_system_reset_exception() - */ - ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); - mtspr(SPRN_CTRLT, ctrl); - - /* Re-enable interrupts in MSR */ - __hard_irq_enable(); -} - -static int cbe_system_reset_exception(struct pt_regs *regs) -{ - switch (regs->msr & SRR1_WAKEMASK) { - case SRR1_WAKEDEC: - set_dec(1); - break; - case SRR1_WAKEEE: - /* - * Handle these when interrupts get re-enabled and we take - * them as regular exceptions. We are in an NMI context - * and can't handle these here. - */ - break; - case SRR1_WAKEMT: - return cbe_sysreset_hack(); -#ifdef CONFIG_CBE_RAS - case SRR1_WAKESYSERR: - cbe_system_error_exception(regs); - break; - case SRR1_WAKETHERM: - cbe_thermal_exception(regs); - break; -#endif /* CONFIG_CBE_RAS */ - default: - /* do system reset */ - return 0; - } - /* everything handled */ - return 1; -} - -void __init cbe_pervasive_init(void) -{ - int cpu; - - if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) - return; - - for_each_possible_cpu(cpu) { - struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu); - if (!regs) - continue; - - /* Enable Pause(0) control bit */ - out_be64(®s->pmcr, in_be64(®s->pmcr) | - CBE_PMD_PAUSE_ZERO_CONTROL); - } - - ppc_md.power_save = cbe_power_save; - ppc_md.system_reset_exception = cbe_system_reset_exception; -} diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h deleted file mode 100644 index 0da74ab10716..000000000000 --- a/arch/powerpc/platforms/cell/pervasive.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Cell Pervasive Monitor and Debug interface and HW structures - * - * (C) Copyright IBM Corporation 2005 - * - * Authors: Maximino Aguilar (maguilar@us.ibm.com) - * David J. Erb (djerb@us.ibm.com) - */ - - -#ifndef PERVASIVE_H -#define PERVASIVE_H - -extern void cbe_pervasive_init(void); - -#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON -extern int cbe_sysreset_hack(void); -#else -static inline int cbe_sysreset_hack(void) -{ - return 1; -} -#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */ - -#endif diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c deleted file mode 100644 index b207a7f99be5..000000000000 --- a/arch/powerpc/platforms/cell/pmu.c +++ /dev/null @@ -1,412 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cell Broadband Engine Performance Monitor - * - * (C) Copyright IBM Corporation 2001,2006 - * - * Author: - * David Erb (djerb@us.ibm.com) - * Kevin Corry (kevcorry@us.ibm.com) - */ - -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <linux/types.h> -#include <linux/export.h> -#include <asm/io.h> -#include <asm/irq_regs.h> -#include <asm/machdep.h> -#include <asm/pmc.h> -#include <asm/reg.h> -#include <asm/spu.h> -#include <asm/cell-regs.h> - -#include "interrupt.h" - -/* - * When writing to write-only mmio addresses, save a shadow copy. All of the - * registers are 32-bit, but stored in the upper-half of a 64-bit field in - * pmd_regs. - */ - -#define WRITE_WO_MMIO(reg, x) \ - do { \ - u32 _x = (x); \ - struct cbe_pmd_regs __iomem *pmd_regs; \ - struct cbe_pmd_shadow_regs *shadow_regs; \ - pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ - out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \ - shadow_regs->reg = _x; \ - } while (0) - -#define READ_SHADOW_REG(val, reg) \ - do { \ - struct cbe_pmd_shadow_regs *shadow_regs; \ - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ - (val) = shadow_regs->reg; \ - } while (0) - -#define READ_MMIO_UPPER32(val, reg) \ - do { \ - struct cbe_pmd_regs __iomem *pmd_regs; \ - pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ - (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \ - } while (0) - -/* - * Physical counter registers. - * Each physical counter can act as one 32-bit counter or two 16-bit counters. - */ - -u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr) -{ - u32 val_in_latch, val = 0; - - if (phys_ctr < NR_PHYS_CTRS) { - READ_SHADOW_REG(val_in_latch, counter_value_in_latch); - - /* Read the latch or the actual counter, whichever is newer. */ - if (val_in_latch & (1 << phys_ctr)) { - READ_SHADOW_REG(val, pm_ctr[phys_ctr]); - } else { - READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]); - } - } - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_phys_ctr); - -void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val) -{ - struct cbe_pmd_shadow_regs *shadow_regs; - u32 pm_ctrl; - - if (phys_ctr < NR_PHYS_CTRS) { - /* Writing to a counter only writes to a hardware latch. - * The new value is not propagated to the actual counter - * until the performance monitor is enabled. - */ - WRITE_WO_MMIO(pm_ctr[phys_ctr], val); - - pm_ctrl = cbe_read_pm(cpu, pm_control); - if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) { - /* The counters are already active, so we need to - * rewrite the pm_control register to "re-enable" - * the PMU. - */ - cbe_write_pm(cpu, pm_control, pm_ctrl); - } else { - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); - shadow_regs->counter_value_in_latch |= (1 << phys_ctr); - } - } -} -EXPORT_SYMBOL_GPL(cbe_write_phys_ctr); - -/* - * "Logical" counter registers. - * These will read/write 16-bits or 32-bits depending on the - * current size of the counter. Counters 4 - 7 are always 16-bit. - */ - -u32 cbe_read_ctr(u32 cpu, u32 ctr) -{ - u32 val; - u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1); - - val = cbe_read_phys_ctr(cpu, phys_ctr); - - if (cbe_get_ctr_size(cpu, phys_ctr) == 16) - val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff); - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_ctr); - -void cbe_write_ctr(u32 cpu, u32 ctr, u32 val) -{ - u32 phys_ctr; - u32 phys_val; - - phys_ctr = ctr & (NR_PHYS_CTRS - 1); - - if (cbe_get_ctr_size(cpu, phys_ctr) == 16) { - phys_val = cbe_read_phys_ctr(cpu, phys_ctr); - - if (ctr < NR_PHYS_CTRS) - val = (val << 16) | (phys_val & 0xffff); - else - val = (val & 0xffff) | (phys_val & 0xffff0000); - } - - cbe_write_phys_ctr(cpu, phys_ctr, val); -} -EXPORT_SYMBOL_GPL(cbe_write_ctr); - -/* - * Counter-control registers. - * Each "logical" counter has a corresponding control register. - */ - -u32 cbe_read_pm07_control(u32 cpu, u32 ctr) -{ - u32 pm07_control = 0; - - if (ctr < NR_CTRS) - READ_SHADOW_REG(pm07_control, pm07_control[ctr]); - - return pm07_control; -} -EXPORT_SYMBOL_GPL(cbe_read_pm07_control); - -void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val) -{ - if (ctr < NR_CTRS) - WRITE_WO_MMIO(pm07_control[ctr], val); -} -EXPORT_SYMBOL_GPL(cbe_write_pm07_control); - -/* - * Other PMU control registers. Most of these are write-only. - */ - -u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg) -{ - u32 val = 0; - - switch (reg) { - case group_control: - READ_SHADOW_REG(val, group_control); - break; - - case debug_bus_control: - READ_SHADOW_REG(val, debug_bus_control); - break; - - case trace_address: - READ_MMIO_UPPER32(val, trace_address); - break; - - case ext_tr_timer: - READ_SHADOW_REG(val, ext_tr_timer); - break; - - case pm_status: - READ_MMIO_UPPER32(val, pm_status); - break; - - case pm_control: - READ_SHADOW_REG(val, pm_control); - break; - - case pm_interval: - READ_MMIO_UPPER32(val, pm_interval); - break; - - case pm_start_stop: - READ_SHADOW_REG(val, pm_start_stop); - break; - } - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_pm); - -void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val) -{ - switch (reg) { - case group_control: - WRITE_WO_MMIO(group_control, val); - break; - - case debug_bus_control: - WRITE_WO_MMIO(debug_bus_control, val); - break; - - case trace_address: - WRITE_WO_MMIO(trace_address, val); - break; - - case ext_tr_timer: - WRITE_WO_MMIO(ext_tr_timer, val); - break; - - case pm_status: - WRITE_WO_MMIO(pm_status, val); - break; - - case pm_control: - WRITE_WO_MMIO(pm_control, val); - break; - - case pm_interval: - WRITE_WO_MMIO(pm_interval, val); - break; - - case pm_start_stop: - WRITE_WO_MMIO(pm_start_stop, val); - break; - } -} -EXPORT_SYMBOL_GPL(cbe_write_pm); - -/* - * Get/set the size of a physical counter to either 16 or 32 bits. - */ - -u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr) -{ - u32 pm_ctrl, size = 0; - - if (phys_ctr < NR_PHYS_CTRS) { - pm_ctrl = cbe_read_pm(cpu, pm_control); - size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32; - } - - return size; -} -EXPORT_SYMBOL_GPL(cbe_get_ctr_size); - -void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size) -{ - u32 pm_ctrl; - - if (phys_ctr < NR_PHYS_CTRS) { - pm_ctrl = cbe_read_pm(cpu, pm_control); - switch (ctr_size) { - case 16: - pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr); - break; - - case 32: - pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr); - break; - } - cbe_write_pm(cpu, pm_control, pm_ctrl); - } -} -EXPORT_SYMBOL_GPL(cbe_set_ctr_size); - -/* - * Enable/disable the entire performance monitoring unit. - * When we enable the PMU, all pending writes to counters get committed. - */ - -void cbe_enable_pm(u32 cpu) -{ - struct cbe_pmd_shadow_regs *shadow_regs; - u32 pm_ctrl; - - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); - shadow_regs->counter_value_in_latch = 0; - - pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON; - cbe_write_pm(cpu, pm_control, pm_ctrl); -} -EXPORT_SYMBOL_GPL(cbe_enable_pm); - -void cbe_disable_pm(u32 cpu) -{ - u32 pm_ctrl; - pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON; - cbe_write_pm(cpu, pm_control, pm_ctrl); -} -EXPORT_SYMBOL_GPL(cbe_disable_pm); - -/* - * Reading from the trace_buffer. - * The trace buffer is two 64-bit registers. Reading from - * the second half automatically increments the trace_address. - */ - -void cbe_read_trace_buffer(u32 cpu, u64 *buf) -{ - struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu); - - *buf++ = in_be64(&pmd_regs->trace_buffer_0_63); - *buf++ = in_be64(&pmd_regs->trace_buffer_64_127); -} -EXPORT_SYMBOL_GPL(cbe_read_trace_buffer); - -/* - * Enabling/disabling interrupts for the entire performance monitoring unit. - */ - -u32 cbe_get_and_clear_pm_interrupts(u32 cpu) -{ - /* Reading pm_status clears the interrupt bits. */ - return cbe_read_pm(cpu, pm_status); -} -EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts); - -void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask) -{ - /* Set which node and thread will handle the next interrupt. */ - iic_set_interrupt_routing(cpu, thread, 0); - - /* Enable the interrupt bits in the pm_status register. */ - if (mask) - cbe_write_pm(cpu, pm_status, mask); -} -EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts); - -void cbe_disable_pm_interrupts(u32 cpu) -{ - cbe_get_and_clear_pm_interrupts(cpu); - cbe_write_pm(cpu, pm_status, 0); -} -EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts); - -static irqreturn_t cbe_pm_irq(int irq, void *dev_id) -{ - perf_irq(get_irq_regs()); - return IRQ_HANDLED; -} - -static int __init cbe_init_pm_irq(void) -{ - unsigned int irq; - int rc, node; - - for_each_online_node(node) { - irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI | - (node << IIC_IRQ_NODE_SHIFT)); - if (!irq) { - printk("ERROR: Unable to allocate irq for node %d\n", - node); - return -EINVAL; - } - - rc = request_irq(irq, cbe_pm_irq, - 0, "cbe-pmu-0", NULL); - if (rc) { - printk("ERROR: Request for irq on node %d failed\n", - node); - return rc; - } - } - - return 0; -} -machine_arch_initcall(cell, cbe_init_pm_irq); - -void cbe_sync_irq(int node) -{ - unsigned int irq; - - irq = irq_find_mapping(NULL, - IIC_IRQ_IOEX_PMI - | (node << IIC_IRQ_NODE_SHIFT)); - - if (!irq) { - printk(KERN_WARNING "ERROR, unable to get existing irq %d " \ - "for node %d\n", irq, node); - return; - } - - synchronize_irq(irq); -} -EXPORT_SYMBOL_GPL(cbe_sync_irq); - diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c deleted file mode 100644 index f6b87926530c..000000000000 --- a/arch/powerpc/platforms/cell/ras.c +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2006-2008, IBM Corporation. - */ - -#undef DEBUG - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/smp.h> -#include <linux/reboot.h> -#include <linux/kexec.h> -#include <linux/crash_dump.h> -#include <linux/of.h> - -#include <asm/kexec.h> -#include <asm/reg.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/rtas.h> -#include <asm/cell-regs.h> - -#include "ras.h" -#include "pervasive.h" - -static void dump_fir(int cpu) -{ - struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu); - struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu); - - if (pregs == NULL) - return; - - /* Todo: do some nicer parsing of bits and based on them go down - * to other sub-units FIRs and not only IIC - */ - printk(KERN_ERR "Global Checkstop FIR : 0x%016llx\n", - in_be64(&pregs->checkstop_fir)); - printk(KERN_ERR "Global Recoverable FIR : 0x%016llx\n", - in_be64(&pregs->checkstop_fir)); - printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n", - in_be64(&pregs->spec_att_mchk_fir)); - - if (iregs == NULL) - return; - printk(KERN_ERR "IOC FIR : 0x%016llx\n", - in_be64(&iregs->ioc_fir)); - -} - -DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception) -{ - int cpu = smp_processor_id(); - - printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu); - dump_fir(cpu); - dump_stack(); -} - -DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception) -{ - int cpu = smp_processor_id(); - - /* - * Nothing implemented for the maintenance interrupt at this point - */ - - printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu); - dump_stack(); -} - -DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception) -{ - int cpu = smp_processor_id(); - - /* - * Nothing implemented for the thermal interrupt at this point - */ - - printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu); - dump_stack(); -} - -static int cbe_machine_check_handler(struct pt_regs *regs) -{ - int cpu = smp_processor_id(); - - printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu); - dump_fir(cpu); - - /* No recovery from this code now, lets continue */ - return 0; -} - -struct ptcal_area { - struct list_head list; - int nid; - int order; - struct page *pages; -}; - -static LIST_HEAD(ptcal_list); - -static int ptcal_start_tok, ptcal_stop_tok; - -static int __init cbe_ptcal_enable_on_node(int nid, int order) -{ - struct ptcal_area *area; - int ret = -ENOMEM; - unsigned long addr; - - if (is_kdump_kernel()) - rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); - - area = kmalloc(sizeof(*area), GFP_KERNEL); - if (!area) - goto out_err; - - area->nid = nid; - area->order = order; - area->pages = __alloc_pages_node(area->nid, - GFP_KERNEL|__GFP_THISNODE, - area->order); - - if (!area->pages) { - printk(KERN_WARNING "%s: no page on node %d\n", - __func__, area->nid); - goto out_free_area; - } - - /* - * We move the ptcal area to the middle of the allocated - * page, in order to avoid prefetches in memcpy and similar - * functions stepping on it. - */ - addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); - printk(KERN_DEBUG "%s: enabling PTCAL on node %d address=0x%016lx\n", - __func__, area->nid, addr); - - ret = -EIO; - if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid, - (unsigned int)(addr >> 32), - (unsigned int)(addr & 0xffffffff))) { - printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n", - __func__, nid); - goto out_free_pages; - } - - list_add(&area->list, &ptcal_list); - - return 0; - -out_free_pages: - __free_pages(area->pages, area->order); -out_free_area: - kfree(area); -out_err: - return ret; -} - -static int __init cbe_ptcal_enable(void) -{ - const u32 *size; - struct device_node *np; - int order, found_mic = 0; - - np = of_find_node_by_path("/rtas"); - if (!np) - return -ENODEV; - - size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); - if (!size) { - of_node_put(np); - return -ENODEV; - } - - pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); - order = get_order(*size); - of_node_put(np); - - /* support for malta device trees, with be@/mic@ nodes */ - for_each_node_by_type(np, "mic-tm") { - cbe_ptcal_enable_on_node(of_node_to_nid(np), order); - found_mic = 1; - } - - if (found_mic) - return 0; - - /* support for older device tree - use cpu nodes */ - for_each_node_by_type(np, "cpu") { - const u32 *nid = of_get_property(np, "node-id", NULL); - if (!nid) { - printk(KERN_ERR "%s: node %pOF is missing node-id?\n", - __func__, np); - continue; - } - cbe_ptcal_enable_on_node(*nid, order); - found_mic = 1; - } - - return found_mic ? 0 : -ENODEV; -} - -static int cbe_ptcal_disable(void) -{ - struct ptcal_area *area, *tmp; - int ret = 0; - - pr_debug("%s: disabling PTCAL\n", __func__); - - list_for_each_entry_safe(area, tmp, &ptcal_list, list) { - /* disable ptcal on this node */ - if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) { - printk(KERN_ERR "%s: error disabling PTCAL " - "on node %d!\n", __func__, - area->nid); - ret = -EIO; - continue; - } - - /* ensure we can access the PTCAL area */ - memset(page_address(area->pages), 0, - 1 << (area->order + PAGE_SHIFT)); - - /* clean up */ - list_del(&area->list); - __free_pages(area->pages, area->order); - kfree(area); - } - - return ret; -} - -static int cbe_ptcal_notify_reboot(struct notifier_block *nb, - unsigned long code, void *data) -{ - return cbe_ptcal_disable(); -} - -static void cbe_ptcal_crash_shutdown(void) -{ - cbe_ptcal_disable(); -} - -static struct notifier_block cbe_ptcal_reboot_notifier = { - .notifier_call = cbe_ptcal_notify_reboot -}; - -#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON -static int sysreset_hack; - -static int __init cbe_sysreset_init(void) -{ - struct cbe_pmd_regs __iomem *regs; - - sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0"); - if (!sysreset_hack) - return 0; - - regs = cbe_get_cpu_pmd_regs(0); - if (!regs) - return 0; - - /* Enable JTAG system-reset hack */ - out_be32(®s->fir_mode_reg, - in_be32(®s->fir_mode_reg) | - CBE_PMD_FIR_MODE_M8); - - return 0; -} -device_initcall(cbe_sysreset_init); - -int cbe_sysreset_hack(void) -{ - struct cbe_pmd_regs __iomem *regs; - - /* - * The BMC can inject user triggered system reset exceptions, - * but cannot set the system reset reason in srr1, - * so check an extra register here. - */ - if (sysreset_hack && (smp_processor_id() == 0)) { - regs = cbe_get_cpu_pmd_regs(0); - if (!regs) - return 0; - if (in_be64(®s->ras_esc_0) & 0x0000ffff) { - out_be64(®s->ras_esc_0, 0); - return 0; - } - } - return 1; -} -#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */ - -static int __init cbe_ptcal_init(void) -{ - int ret; - ptcal_start_tok = rtas_function_token(RTAS_FN_IBM_CBE_START_PTCAL); - ptcal_stop_tok = rtas_function_token(RTAS_FN_IBM_CBE_STOP_PTCAL); - - if (ptcal_start_tok == RTAS_UNKNOWN_SERVICE - || ptcal_stop_tok == RTAS_UNKNOWN_SERVICE) - return -ENODEV; - - ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier); - if (ret) - goto out1; - - ret = crash_shutdown_register(&cbe_ptcal_crash_shutdown); - if (ret) - goto out2; - - return cbe_ptcal_enable(); - -out2: - unregister_reboot_notifier(&cbe_ptcal_reboot_notifier); -out1: - printk(KERN_ERR "Can't disable PTCAL, so not enabling\n"); - return ret; -} - -arch_initcall(cbe_ptcal_init); - -void __init cbe_ras_init(void) -{ - unsigned long hid0; - - /* - * Enable System Error & thermal interrupts and wakeup conditions - */ - - hid0 = mfspr(SPRN_HID0); - hid0 |= HID0_CBE_THERM_INT_EN | HID0_CBE_THERM_WAKEUP | - HID0_CBE_SYSERR_INT_EN | HID0_CBE_SYSERR_WAKEUP; - mtspr(SPRN_HID0, hid0); - mb(); - - /* - * Install machine check handler. Leave setting of precise mode to - * what the firmware did for now - */ - ppc_md.machine_check_exception = cbe_machine_check_handler; - mb(); - - /* - * For now, we assume that IOC_FIR is already set to forward some - * error conditions to the System Error handler. If that is not true - * then it will have to be fixed up here. - */ -} diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h deleted file mode 100644 index 226dbd48efad..000000000000 --- a/arch/powerpc/platforms/cell/ras.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef RAS_H -#define RAS_H - -#include <asm/interrupt.h> - -DECLARE_INTERRUPT_HANDLER(cbe_system_error_exception); -DECLARE_INTERRUPT_HANDLER(cbe_maintenance_exception); -DECLARE_INTERRUPT_HANDLER(cbe_thermal_exception); - -extern void cbe_ras_init(void); - -#endif /* RAS_H */ diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c deleted file mode 100644 index f64a1ef98aa8..000000000000 --- a/arch/powerpc/platforms/cell/setup.c +++ /dev/null @@ -1,274 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * linux/arch/powerpc/platforms/cell/cell_setup.c - * - * Copyright (C) 1995 Linus Torvalds - * Adapted from 'alpha' version by Gary Thomas - * Modified by Cort Dougan (cort@cs.nmt.edu) - * Modified by PPC64 Team, IBM Corp - * Modified by Cell Team, IBM Deutschland Entwicklung GmbH - */ -#undef DEBUG - -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/export.h> -#include <linux/unistd.h> -#include <linux/user.h> -#include <linux/reboot.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/console.h> -#include <linux/mutex.h> -#include <linux/memory_hotplug.h> -#include <linux/of_platform.h> -#include <linux/platform_device.h> - -#include <asm/mmu.h> -#include <asm/processor.h> -#include <asm/io.h> -#include <asm/rtas.h> -#include <asm/pci-bridge.h> -#include <asm/iommu.h> -#include <asm/dma.h> -#include <asm/machdep.h> -#include <asm/time.h> -#include <asm/nvram.h> -#include <asm/cputable.h> -#include <asm/ppc-pci.h> -#include <asm/irq.h> -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/udbg.h> -#include <asm/mpic.h> -#include <asm/cell-regs.h> -#include <asm/io-workarounds.h> - -#include "cell.h" -#include "interrupt.h" -#include "pervasive.h" -#include "ras.h" - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -static void cell_show_cpuinfo(struct seq_file *m) -{ - struct device_node *root; - const char *model = ""; - - root = of_find_node_by_path("/"); - if (root) - model = of_get_property(root, "model", NULL); - seq_printf(m, "machine\t\t: CHRP %s\n", model); - of_node_put(root); -} - -static void cell_progress(char *s, unsigned short hex) -{ - printk("*** %04x : %s\n", hex, s ? s : ""); -} - -static void cell_fixup_pcie_rootcomplex(struct pci_dev *dev) -{ - struct pci_controller *hose; - const char *s; - int i; - - if (!machine_is(cell)) - return; - - /* We're searching for a direct child of the PHB */ - if (dev->bus->self != NULL || dev->devfn != 0) - return; - - hose = pci_bus_to_host(dev->bus); - if (hose == NULL) - return; - - /* Only on PCIE */ - if (!of_device_is_compatible(hose->dn, "pciex")) - return; - - /* And only on axon */ - s = of_get_property(hose->dn, "model", NULL); - if (!s || strcmp(s, "Axon") != 0) - return; - - for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { - dev->resource[i].start = dev->resource[i].end = 0; - dev->resource[i].flags = 0; - } - - printk(KERN_DEBUG "PCI: Hiding resources on Axon PCIE RC %s\n", - pci_name(dev)); -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, cell_fixup_pcie_rootcomplex); - -static int cell_setup_phb(struct pci_controller *phb) -{ - const char *model; - struct device_node *np; - - int rc = rtas_setup_phb(phb); - if (rc) - return rc; - - phb->controller_ops = cell_pci_controller_ops; - - np = phb->dn; - model = of_get_property(np, "model", NULL); - if (model == NULL || !of_node_name_eq(np, "pci")) - return 0; - - /* Setup workarounds for spider */ - if (strcmp(model, "Spider")) - return 0; - - iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, - (void *)SPIDER_PCI_REG_BASE); - return 0; -} - -static const struct of_device_id cell_bus_ids[] __initconst = { - { .type = "soc", }, - { .compatible = "soc", }, - { .type = "spider", }, - { .type = "axon", }, - { .type = "plb5", }, - { .type = "plb4", }, - { .type = "opb", }, - { .type = "ebc", }, - {}, -}; - -static int __init cell_publish_devices(void) -{ - struct device_node *root = of_find_node_by_path("/"); - struct device_node *np; - int node; - - /* Publish OF platform devices for southbridge IOs */ - of_platform_bus_probe(NULL, cell_bus_ids, NULL); - - /* On spider based blades, we need to manually create the OF - * platform devices for the PCI host bridges - */ - for_each_child_of_node(root, np) { - if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "pciex")) - continue; - of_platform_device_create(np, NULL, NULL); - } - - of_node_put(root); - - /* There is no device for the MIC memory controller, thus we create - * a platform device for it to attach the EDAC driver to. - */ - for_each_online_node(node) { - if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL) - continue; - platform_device_register_simple("cbe-mic", node, NULL, 0); - } - - return 0; -} -machine_subsys_initcall(cell, cell_publish_devices); - -static void __init mpic_init_IRQ(void) -{ - struct device_node *dn; - struct mpic *mpic; - - for_each_node_by_name(dn, "interrupt-controller") { - if (!of_device_is_compatible(dn, "CBEA,platform-open-pic")) - continue; - - /* The MPIC driver will get everything it needs from the - * device-tree, just pass 0 to all arguments - */ - mpic = mpic_alloc(dn, 0, MPIC_SECONDARY | MPIC_NO_RESET, - 0, 0, " MPIC "); - if (mpic == NULL) - continue; - mpic_init(mpic); - } -} - - -static void __init cell_init_irq(void) -{ - iic_init_IRQ(); - spider_init_IRQ(); - mpic_init_IRQ(); -} - -static void __init cell_set_dabrx(void) -{ - mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); -} - -static void __init cell_setup_arch(void) -{ -#ifdef CONFIG_SPU_BASE - spu_priv1_ops = &spu_priv1_mmio_ops; - spu_management_ops = &spu_management_of_ops; -#endif - - cbe_regs_init(); - - cell_set_dabrx(); - -#ifdef CONFIG_CBE_RAS - cbe_ras_init(); -#endif - -#ifdef CONFIG_SMP - smp_init_cell(); -#endif - /* init to some ~sane value until calibrate_delay() runs */ - loops_per_jiffy = 50000000; - - /* Find and initialize PCI host bridges */ - init_pci_config_tokens(); - - cbe_pervasive_init(); - - mmio_nvram_init(); -} - -static int __init cell_probe(void) -{ - if (!of_machine_is_compatible("IBM,CBEA") && - !of_machine_is_compatible("IBM,CPBW-1.0")) - return 0; - - pm_power_off = rtas_power_off; - - return 1; -} - -define_machine(cell) { - .name = "Cell", - .probe = cell_probe, - .setup_arch = cell_setup_arch, - .show_cpuinfo = cell_show_cpuinfo, - .restart = rtas_restart, - .halt = rtas_halt, - .get_boot_time = rtas_get_boot_time, - .get_rtc_time = rtas_get_rtc_time, - .set_rtc_time = rtas_set_rtc_time, - .progress = cell_progress, - .init_IRQ = cell_init_irq, - .pci_setup_phb = cell_setup_phb, -}; - -struct pci_controller_ops cell_pci_controller_ops; diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c deleted file mode 100644 index 30394c6f8894..000000000000 --- a/arch/powerpc/platforms/cell/smp.c +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * SMP support for BPA machines. - * - * Dave Engebretsen, Peter Bergner, and - * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com - * - * Plus various changes from other IBM teams... - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/cache.h> -#include <linux/err.h> -#include <linux/device.h> -#include <linux/cpu.h> -#include <linux/pgtable.h> - -#include <asm/ptrace.h> -#include <linux/atomic.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/io.h> -#include <asm/smp.h> -#include <asm/paca.h> -#include <asm/machdep.h> -#include <asm/cputable.h> -#include <asm/firmware.h> -#include <asm/rtas.h> -#include <asm/cputhreads.h> -#include <asm/code-patching.h> - -#include "interrupt.h" -#include <asm/udbg.h> - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -/* - * The Primary thread of each non-boot processor was started from the OF client - * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. - */ -static cpumask_t of_spin_map; - -/** - * smp_startup_cpu() - start the given cpu - * - * At boot time, there is nothing to do for primary threads which were - * started from Open Firmware. For anything else, call RTAS with the - * appropriate start location. - * - * Returns: - * 0 - failure - * 1 - success - */ -static inline int smp_startup_cpu(unsigned int lcpu) -{ - int status; - unsigned long start_here = - __pa(ppc_function_entry(generic_secondary_smp_init)); - unsigned int pcpu; - int start_cpu; - - if (cpumask_test_cpu(lcpu, &of_spin_map)) - /* Already started by OF and sitting in spin loop */ - return 1; - - pcpu = get_hard_smp_processor_id(lcpu); - - /* - * If the RTAS start-cpu token does not exist then presume the - * cpu is already spinning. - */ - start_cpu = rtas_function_token(RTAS_FN_START_CPU); - if (start_cpu == RTAS_UNKNOWN_SERVICE) - return 1; - - status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); - if (status != 0) { - printk(KERN_ERR "start-cpu failed: %i\n", status); - return 0; - } - - return 1; -} - -static void smp_cell_setup_cpu(int cpu) -{ - if (cpu != boot_cpuid) - iic_setup_cpu(); - - /* - * change default DABRX to allow user watchpoints - */ - mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); -} - -static int smp_cell_kick_cpu(int nr) -{ - if (nr < 0 || nr >= nr_cpu_ids) - return -EINVAL; - - if (!smp_startup_cpu(nr)) - return -ENOENT; - - /* - * The processor is currently spinning, waiting for the - * cpu_start field to become non-zero After we set cpu_start, - * the processor will continue on to secondary_start - */ - paca_ptrs[nr]->cpu_start = 1; - - return 0; -} - -static struct smp_ops_t bpa_iic_smp_ops = { - .message_pass = iic_message_pass, - .probe = iic_request_IPIs, - .kick_cpu = smp_cell_kick_cpu, - .setup_cpu = smp_cell_setup_cpu, - .cpu_bootable = smp_generic_cpu_bootable, -}; - -/* This is called very early */ -void __init smp_init_cell(void) -{ - int i; - - DBG(" -> smp_init_cell()\n"); - - smp_ops = &bpa_iic_smp_ops; - - /* Mark threads which are still spinning in hold loops. */ - if (cpu_has_feature(CPU_FTR_SMT)) { - for_each_present_cpu(i) { - if (cpu_thread_in_core(i) == 0) - cpumask_set_cpu(i, &of_spin_map); - } - } else - cpumask_copy(&of_spin_map, cpu_present_mask); - - cpumask_clear_cpu(boot_cpuid, &of_spin_map); - - /* Non-lpar has additional take/give timebase */ - if (rtas_function_token(RTAS_FN_FREEZE_TIME_BASE) != RTAS_UNKNOWN_SERVICE) { - smp_ops->give_timebase = rtas_give_timebase; - smp_ops->take_timebase = rtas_take_timebase; - } - - DBG(" <- smp_init_cell()\n"); -} diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c deleted file mode 100644 index 68439445b1c3..000000000000 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * IO workarounds for PCI on Celleb/Cell platform - * - * (C) Copyright 2006-2007 TOSHIBA CORPORATION - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/of_address.h> -#include <linux/slab.h> -#include <linux/io.h> - -#include <asm/ppc-pci.h> -#include <asm/pci-bridge.h> -#include <asm/io-workarounds.h> - -#define SPIDER_PCI_DISABLE_PREFETCH - -struct spiderpci_iowa_private { - void __iomem *regs; -}; - -static void spiderpci_io_flush(struct iowa_bus *bus) -{ - struct spiderpci_iowa_private *priv; - - priv = bus->private; - in_be32(priv->regs + SPIDER_PCI_DUMMY_READ); - iosync(); -} - -#define SPIDER_PCI_MMIO_READ(name, ret) \ -static ret spiderpci_##name(const PCI_IO_ADDR addr) \ -{ \ - ret val = __do_##name(addr); \ - spiderpci_io_flush(iowa_mem_find_bus(addr)); \ - return val; \ -} - -#define SPIDER_PCI_MMIO_READ_STR(name) \ -static void spiderpci_##name(const PCI_IO_ADDR addr, void *buf, \ - unsigned long count) \ -{ \ - __do_##name(addr, buf, count); \ - spiderpci_io_flush(iowa_mem_find_bus(addr)); \ -} - -SPIDER_PCI_MMIO_READ(readb, u8) -SPIDER_PCI_MMIO_READ(readw, u16) -SPIDER_PCI_MMIO_READ(readl, u32) -SPIDER_PCI_MMIO_READ(readq, u64) -SPIDER_PCI_MMIO_READ(readw_be, u16) -SPIDER_PCI_MMIO_READ(readl_be, u32) -SPIDER_PCI_MMIO_READ(readq_be, u64) -SPIDER_PCI_MMIO_READ_STR(readsb) -SPIDER_PCI_MMIO_READ_STR(readsw) -SPIDER_PCI_MMIO_READ_STR(readsl) - -static void spiderpci_memcpy_fromio(void *dest, const PCI_IO_ADDR src, - unsigned long n) -{ - __do_memcpy_fromio(dest, src, n); - spiderpci_io_flush(iowa_mem_find_bus(src)); -} - -static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, - void __iomem *regs) -{ - void *dummy_page_va; - dma_addr_t dummy_page_da; - -#ifdef SPIDER_PCI_DISABLE_PREFETCH - u32 val = in_be32(regs + SPIDER_PCI_VCI_CNTL_STAT); - pr_debug("SPIDER_IOWA:PVCI_Control_Status was 0x%08x\n", val); - out_be32(regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8); -#endif /* SPIDER_PCI_DISABLE_PREFETCH */ - - /* setup dummy read */ - /* - * On CellBlade, we can't know that which XDR memory is used by - * kmalloc() to allocate dummy_page_va. - * In order to improve the performance, the XDR which is used to - * allocate dummy_page_va is the nearest the spider-pci. - * We have to select the CBE which is the nearest the spider-pci - * to allocate memory from the best XDR, but I don't know that - * how to do. - * - * Celleb does not have this problem, because it has only one XDR. - */ - dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!dummy_page_va) { - pr_err("SPIDERPCI-IOWA:Alloc dummy_page_va failed.\n"); - return -1; - } - - dummy_page_da = dma_map_single(phb->parent, dummy_page_va, - PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(phb->parent, dummy_page_da)) { - pr_err("SPIDER-IOWA:Map dummy page filed.\n"); - kfree(dummy_page_va); - return -1; - } - - out_be32(regs + SPIDER_PCI_DUMMY_READ_BASE, dummy_page_da); - - return 0; -} - -int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data) -{ - void __iomem *regs = NULL; - struct spiderpci_iowa_private *priv; - struct device_node *np = bus->phb->dn; - struct resource r; - unsigned long offset = (unsigned long)data; - - pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n", - np); - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - pr_err("SPIDERPCI-IOWA:" - "Can't allocate struct spiderpci_iowa_private"); - return -1; - } - - if (of_address_to_resource(np, 0, &r)) { - pr_err("SPIDERPCI-IOWA:Can't get resource.\n"); - goto error; - } - - regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE); - if (!regs) { - pr_err("SPIDERPCI-IOWA:ioremap failed.\n"); - goto error; - } - priv->regs = regs; - bus->private = priv; - - if (spiderpci_pci_setup_chip(bus->phb, regs)) - goto error; - - return 0; - -error: - kfree(priv); - bus->private = NULL; - - if (regs) - iounmap(regs); - - return -1; -} - -struct ppc_pci_io spiderpci_ops = { - .readb = spiderpci_readb, - .readw = spiderpci_readw, - .readl = spiderpci_readl, - .readq = spiderpci_readq, - .readw_be = spiderpci_readw_be, - .readl_be = spiderpci_readl_be, - .readq_be = spiderpci_readq_be, - .readsb = spiderpci_readsb, - .readsw = spiderpci_readsw, - .readsl = spiderpci_readsl, - .memcpy_fromio = spiderpci_memcpy_fromio, -}; - diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c deleted file mode 100644 index 11df737c8c6a..000000000000 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ /dev/null @@ -1,344 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * External Interrupt Controller on Spider South Bridge - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Arnd Bergmann <arndb@de.ibm.com> - */ - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/ioport.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/pgtable.h> - -#include <asm/io.h> - -#include "interrupt.h" - -/* register layout taken from Spider spec, table 7.4-4 */ -enum { - TIR_DEN = 0x004, /* Detection Enable Register */ - TIR_MSK = 0x084, /* Mask Level Register */ - TIR_EDC = 0x0c0, /* Edge Detection Clear Register */ - TIR_PNDA = 0x100, /* Pending Register A */ - TIR_PNDB = 0x104, /* Pending Register B */ - TIR_CS = 0x144, /* Current Status Register */ - TIR_LCSA = 0x150, /* Level Current Status Register A */ - TIR_LCSB = 0x154, /* Level Current Status Register B */ - TIR_LCSC = 0x158, /* Level Current Status Register C */ - TIR_LCSD = 0x15c, /* Level Current Status Register D */ - TIR_CFGA = 0x200, /* Setting Register A0 */ - TIR_CFGB = 0x204, /* Setting Register B0 */ - /* 0x208 ... 0x3ff Setting Register An/Bn */ - TIR_PPNDA = 0x400, /* Packet Pending Register A */ - TIR_PPNDB = 0x404, /* Packet Pending Register B */ - TIR_PIERA = 0x408, /* Packet Output Error Register A */ - TIR_PIERB = 0x40c, /* Packet Output Error Register B */ - TIR_PIEN = 0x444, /* Packet Output Enable Register */ - TIR_PIPND = 0x454, /* Packet Output Pending Register */ - TIRDID = 0x484, /* Spider Device ID Register */ - REISTIM = 0x500, /* Reissue Command Timeout Time Setting */ - REISTIMEN = 0x504, /* Reissue Command Timeout Setting */ - REISWAITEN = 0x508, /* Reissue Wait Control*/ -}; - -#define SPIDER_CHIP_COUNT 4 -#define SPIDER_SRC_COUNT 64 -#define SPIDER_IRQ_INVALID 63 - -struct spider_pic { - struct irq_domain *host; - void __iomem *regs; - unsigned int node_id; -}; -static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; - -static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d) -{ - return irq_data_get_irq_chip_data(d); -} - -static void __iomem *spider_get_irq_config(struct spider_pic *pic, - unsigned int src) -{ - return pic->regs + TIR_CFGA + 8 * src; -} - -static void spider_unmask_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); - - out_be32(cfg, in_be32(cfg) | 0x30000000u); -} - -static void spider_mask_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); - - out_be32(cfg, in_be32(cfg) & ~0x30000000u); -} - -static void spider_ack_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - unsigned int src = irqd_to_hwirq(d); - - /* Reset edge detection logic if necessary - */ - if (irqd_is_level_type(d)) - return; - - /* Only interrupts 47 to 50 can be set to edge */ - if (src < 47 || src > 50) - return; - - /* Perform the clear of the edge logic */ - out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); -} - -static int spider_set_irq_type(struct irq_data *d, unsigned int type) -{ - unsigned int sense = type & IRQ_TYPE_SENSE_MASK; - struct spider_pic *pic = spider_irq_data_to_pic(d); - unsigned int hw = irqd_to_hwirq(d); - void __iomem *cfg = spider_get_irq_config(pic, hw); - u32 old_mask; - u32 ic; - - /* Note that only level high is supported for most interrupts */ - if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH && - (hw < 47 || hw > 50)) - return -EINVAL; - - /* Decode sense type */ - switch(sense) { - case IRQ_TYPE_EDGE_RISING: - ic = 0x3; - break; - case IRQ_TYPE_EDGE_FALLING: - ic = 0x2; - break; - case IRQ_TYPE_LEVEL_LOW: - ic = 0x0; - break; - case IRQ_TYPE_LEVEL_HIGH: - case IRQ_TYPE_NONE: - ic = 0x1; - break; - default: - return -EINVAL; - } - - /* Configure the source. One gross hack that was there before and - * that I've kept around is the priority to the BE which I set to - * be the same as the interrupt source number. I don't know whether - * that's supposed to make any kind of sense however, we'll have to - * decide that, but for now, I'm not changing the behaviour. - */ - old_mask = in_be32(cfg) & 0x30000000u; - out_be32(cfg, old_mask | (ic << 24) | (0x7 << 16) | - (pic->node_id << 4) | 0xe); - out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); - - return 0; -} - -static struct irq_chip spider_pic = { - .name = "SPIDER", - .irq_unmask = spider_unmask_irq, - .irq_mask = spider_mask_irq, - .irq_ack = spider_ack_irq, - .irq_set_type = spider_set_irq_type, -}; - -static int spider_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - irq_set_chip_data(virq, h->host_data); - irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); - - /* Set default irq type */ - irq_set_irq_type(virq, IRQ_TYPE_NONE); - - return 0; -} - -static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) - -{ - /* Spider interrupts have 2 cells, first is the interrupt source, - * second, well, I don't know for sure yet ... We mask the top bits - * because old device-trees encode a node number in there - */ - *out_hwirq = intspec[0] & 0x3f; - *out_flags = IRQ_TYPE_LEVEL_HIGH; - return 0; -} - -static const struct irq_domain_ops spider_host_ops = { - .map = spider_host_map, - .xlate = spider_host_xlate, -}; - -static void spider_irq_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct spider_pic *pic = irq_desc_get_handler_data(desc); - unsigned int cs; - - cs = in_be32(pic->regs + TIR_CS) >> 24; - if (cs != SPIDER_IRQ_INVALID) - generic_handle_domain_irq(pic->host, cs); - - chip->irq_eoi(&desc->irq_data); -} - -/* For hooking up the cascade we have a problem. Our device-tree is - * crap and we don't know on which BE iic interrupt we are hooked on at - * least not the "standard" way. We can reconstitute it based on two - * informations though: which BE node we are connected to and whether - * we are connected to IOIF0 or IOIF1. Right now, we really only care - * about the IBM cell blade and we know that its firmware gives us an - * interrupt-map property which is pretty strange. - */ -static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) -{ - unsigned int virq; - const u32 *imap, *tmp; - int imaplen, intsize, unit; - struct device_node *iic; - struct device_node *of_node; - - of_node = irq_domain_get_of_node(pic->host); - - /* First, we check whether we have a real "interrupts" in the device - * tree in case the device-tree is ever fixed - */ - virq = irq_of_parse_and_map(of_node, 0); - if (virq) - return virq; - - /* Now do the horrible hacks */ - tmp = of_get_property(of_node, "#interrupt-cells", NULL); - if (tmp == NULL) - return 0; - intsize = *tmp; - imap = of_get_property(of_node, "interrupt-map", &imaplen); - if (imap == NULL || imaplen < (intsize + 1)) - return 0; - iic = of_find_node_by_phandle(imap[intsize]); - if (iic == NULL) - return 0; - imap += intsize + 1; - tmp = of_get_property(iic, "#interrupt-cells", NULL); - if (tmp == NULL) { - of_node_put(iic); - return 0; - } - intsize = *tmp; - /* Assume unit is last entry of interrupt specifier */ - unit = imap[intsize - 1]; - /* Ok, we have a unit, now let's try to get the node */ - tmp = of_get_property(iic, "ibm,interrupt-server-ranges", NULL); - if (tmp == NULL) { - of_node_put(iic); - return 0; - } - /* ugly as hell but works for now */ - pic->node_id = (*tmp) >> 1; - of_node_put(iic); - - /* Ok, now let's get cracking. You may ask me why I just didn't match - * the iic host from the iic OF node, but that way I'm still compatible - * with really really old old firmwares for which we don't have a node - */ - /* Manufacture an IIC interrupt number of class 2 */ - virq = irq_create_mapping(NULL, - (pic->node_id << IIC_IRQ_NODE_SHIFT) | - (2 << IIC_IRQ_CLASS_SHIFT) | - unit); - if (!virq) - printk(KERN_ERR "spider_pic: failed to map cascade !"); - return virq; -} - - -static void __init spider_init_one(struct device_node *of_node, int chip, - unsigned long addr) -{ - struct spider_pic *pic = &spider_pics[chip]; - int i, virq; - - /* Map registers */ - pic->regs = ioremap(addr, 0x1000); - if (pic->regs == NULL) - panic("spider_pic: can't map registers !"); - - /* Allocate a host */ - pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT, - &spider_host_ops, pic); - if (pic->host == NULL) - panic("spider_pic: can't allocate irq host !"); - - /* Go through all sources and disable them */ - for (i = 0; i < SPIDER_SRC_COUNT; i++) { - void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; - out_be32(cfg, in_be32(cfg) & ~0x30000000u); - } - - /* do not mask any interrupts because of level */ - out_be32(pic->regs + TIR_MSK, 0x0); - - /* enable interrupt packets to be output */ - out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1); - - /* Hook up the cascade interrupt to the iic and nodeid */ - virq = spider_find_cascade_and_node(pic); - if (!virq) - return; - irq_set_handler_data(virq, pic); - irq_set_chained_handler(virq, spider_irq_cascade); - - printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %pOF\n", - pic->node_id, addr, of_node); - - /* Enable the interrupt detection enable bit. Do this last! */ - out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); -} - -void __init spider_init_IRQ(void) -{ - struct resource r; - struct device_node *dn; - int chip = 0; - - /* XXX node numbers are totally bogus. We _hope_ we get the device - * nodes in the right order here but that's definitely not guaranteed, - * we need to get the node from the device tree instead. - * There is currently no proper property for it (but our whole - * device-tree is bogus anyway) so all we can do is pray or maybe test - * the address and deduce the node-id - */ - for_each_node_by_name(dn, "interrupt-controller") { - if (of_device_is_compatible(dn, "CBEA,platform-spider-pic")) { - if (of_address_to_resource(dn, 0, &r)) { - printk(KERN_WARNING "spider-pic: Failed\n"); - continue; - } - } else if (of_device_is_compatible(dn, "sti,platform-spider-pic") - && (chip < 2)) { - static long hard_coded_pics[] = - { 0x24000008000ul, 0x34000008000ul}; - r.start = hard_coded_pics[chip]; - } else - continue; - spider_init_one(dn, chip++, r.start); - } -} diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index dea6f0f25897..2c07387201d0 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -23,7 +23,6 @@ #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/spu_csa.h> -#include <asm/xmon.h> #include <asm/kexec.h> const struct spu_management_ops *spu_management_ops; @@ -772,7 +771,6 @@ static int __init init_spu_base(void) fb_append_extra_logo(&logo_spe_clut224, ret); mutex_lock(&spu_full_list_mutex); - xmon_register_spus(&spu_full_list); crash_register_spus(&spu_full_list); mutex_unlock(&spu_full_list_mutex); spu_add_dev_attr(&dev_attr_stat); diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c deleted file mode 100644 index f464a1f2e568..000000000000 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ /dev/null @@ -1,530 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * spu management operations for of based platforms - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * Copyright 2006 Sony Corp. - * (C) Copyright 2007 TOSHIBA CORPORATION - */ - -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/export.h> -#include <linux/ptrace.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/io.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> - -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/firmware.h> - -#include "spufs/spufs.h" -#include "interrupt.h" -#include "spu_priv1_mmio.h" - -struct device_node *spu_devnode(struct spu *spu) -{ - return spu->devnode; -} - -EXPORT_SYMBOL_GPL(spu_devnode); - -static u64 __init find_spu_unit_number(struct device_node *spe) -{ - const unsigned int *prop; - int proplen; - - /* new device trees should provide the physical-id attribute */ - prop = of_get_property(spe, "physical-id", &proplen); - if (proplen == 4) - return (u64)*prop; - - /* celleb device tree provides the unit-id */ - prop = of_get_property(spe, "unit-id", &proplen); - if (proplen == 4) - return (u64)*prop; - - /* legacy device trees provide the id in the reg attribute */ - prop = of_get_property(spe, "reg", &proplen); - if (proplen == 4) - return (u64)*prop; - - return 0; -} - -static void spu_unmap(struct spu *spu) -{ - if (!firmware_has_feature(FW_FEATURE_LPAR)) - iounmap(spu->priv1); - iounmap(spu->priv2); - iounmap(spu->problem); - iounmap((__force u8 __iomem *)spu->local_store); -} - -static int __init spu_map_interrupts_old(struct spu *spu, - struct device_node *np) -{ - unsigned int isrc; - const u32 *tmp; - int nid; - - /* Get the interrupt source unit from the device-tree */ - tmp = of_get_property(np, "isrc", NULL); - if (!tmp) - return -ENODEV; - isrc = tmp[0]; - - tmp = of_get_property(np->parent->parent, "node-id", NULL); - if (!tmp) { - printk(KERN_WARNING "%s: can't find node-id\n", __func__); - nid = spu->node; - } else - nid = tmp[0]; - - /* Add the node number */ - isrc |= nid << IIC_IRQ_NODE_SHIFT; - - /* Now map interrupts of all 3 classes */ - spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); - spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); - spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); - - /* Right now, we only fail if class 2 failed */ - if (!spu->irqs[2]) - return -EINVAL; - - return 0; -} - -static void __iomem * __init spu_map_prop_old(struct spu *spu, - struct device_node *n, - const char *name) -{ - const struct address_prop { - unsigned long address; - unsigned int len; - } __attribute__((packed)) *prop; - int proplen; - - prop = of_get_property(n, name, &proplen); - if (prop == NULL || proplen != sizeof (struct address_prop)) - return NULL; - - return ioremap(prop->address, prop->len); -} - -static int __init spu_map_device_old(struct spu *spu) -{ - struct device_node *node = spu->devnode; - const char *prop; - int ret; - - ret = -ENODEV; - spu->name = of_get_property(node, "name", NULL); - if (!spu->name) - goto out; - - prop = of_get_property(node, "local-store", NULL); - if (!prop) - goto out; - spu->local_store_phys = *(unsigned long *)prop; - - /* we use local store as ram, not io memory */ - spu->local_store = (void __force *) - spu_map_prop_old(spu, node, "local-store"); - if (!spu->local_store) - goto out; - - prop = of_get_property(node, "problem", NULL); - if (!prop) - goto out_unmap; - spu->problem_phys = *(unsigned long *)prop; - - spu->problem = spu_map_prop_old(spu, node, "problem"); - if (!spu->problem) - goto out_unmap; - - spu->priv2 = spu_map_prop_old(spu, node, "priv2"); - if (!spu->priv2) - goto out_unmap; - - if (!firmware_has_feature(FW_FEATURE_LPAR)) { - spu->priv1 = spu_map_prop_old(spu, node, "priv1"); - if (!spu->priv1) - goto out_unmap; - } - - ret = 0; - goto out; - -out_unmap: - spu_unmap(spu); -out: - return ret; -} - -static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) -{ - int i; - - for (i=0; i < 3; i++) { - spu->irqs[i] = irq_of_parse_and_map(np, i); - if (!spu->irqs[i]) - goto err; - } - return 0; - -err: - pr_debug("failed to map irq %x for spu %s\n", i, spu->name); - for (; i >= 0; i--) { - if (spu->irqs[i]) - irq_dispose_mapping(spu->irqs[i]); - } - return -EINVAL; -} - -static int __init spu_map_resource(struct spu *spu, int nr, - void __iomem** virt, unsigned long *phys) -{ - struct device_node *np = spu->devnode; - struct resource resource = { }; - unsigned long len; - int ret; - - ret = of_address_to_resource(np, nr, &resource); - if (ret) - return ret; - if (phys) - *phys = resource.start; - len = resource_size(&resource); - *virt = ioremap(resource.start, len); - if (!*virt) - return -EINVAL; - return 0; -} - -static int __init spu_map_device(struct spu *spu) -{ - struct device_node *np = spu->devnode; - int ret = -ENODEV; - - spu->name = of_get_property(np, "name", NULL); - if (!spu->name) - goto out; - - ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, - &spu->local_store_phys); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 0\n", - np); - goto out; - } - ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, - &spu->problem_phys); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 1\n", - np); - goto out_unmap; - } - ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 2\n", - np); - goto out_unmap; - } - if (!firmware_has_feature(FW_FEATURE_LPAR)) - ret = spu_map_resource(spu, 3, - (void __iomem**)&spu->priv1, NULL); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 3\n", - np); - goto out_unmap; - } - pr_debug("spu_new: %pOF maps:\n", np); - pr_debug(" local store : 0x%016lx -> 0x%p\n", - spu->local_store_phys, spu->local_store); - pr_debug(" problem state : 0x%016lx -> 0x%p\n", - spu->problem_phys, spu->problem); - pr_debug(" priv2 : 0x%p\n", spu->priv2); - pr_debug(" priv1 : 0x%p\n", spu->priv1); - - return 0; - -out_unmap: - spu_unmap(spu); -out: - pr_debug("failed to map spe %s: %d\n", spu->name, ret); - return ret; -} - -static int __init of_enumerate_spus(int (*fn)(void *data)) -{ - int ret; - struct device_node *node; - unsigned int n = 0; - - ret = -ENODEV; - for_each_node_by_type(node, "spe") { - ret = fn(node); - if (ret) { - printk(KERN_WARNING "%s: Error initializing %pOFn\n", - __func__, node); - of_node_put(node); - break; - } - n++; - } - return ret ? ret : n; -} - -static int __init of_create_spu(struct spu *spu, void *data) -{ - int ret; - struct device_node *spe = (struct device_node *)data; - static int legacy_map = 0, legacy_irq = 0; - - spu->devnode = of_node_get(spe); - spu->spe_id = find_spu_unit_number(spe); - - spu->node = of_node_to_nid(spe); - if (spu->node >= MAX_NUMNODES) { - printk(KERN_WARNING "SPE %pOF on node %d ignored," - " node number too big\n", spe, spu->node); - printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); - ret = -ENODEV; - goto out; - } - - ret = spu_map_device(spu); - if (ret) { - if (!legacy_map) { - legacy_map = 1; - printk(KERN_WARNING "%s: Legacy device tree found, " - "trying to map old style\n", __func__); - } - ret = spu_map_device_old(spu); - if (ret) { - printk(KERN_ERR "Unable to map %s\n", - spu->name); - goto out; - } - } - - ret = spu_map_interrupts(spu, spe); - if (ret) { - if (!legacy_irq) { - legacy_irq = 1; - printk(KERN_WARNING "%s: Legacy device tree found, " - "trying old style irq\n", __func__); - } - ret = spu_map_interrupts_old(spu, spe); - if (ret) { - printk(KERN_ERR "%s: could not map interrupts\n", - spu->name); - goto out_unmap; - } - } - - pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name, - spu->local_store, spu->problem, spu->priv1, - spu->priv2, spu->number); - goto out; - -out_unmap: - spu_unmap(spu); -out: - return ret; -} - -static int of_destroy_spu(struct spu *spu) -{ - spu_unmap(spu); - of_node_put(spu->devnode); - return 0; -} - -static void enable_spu_by_master_run(struct spu_context *ctx) -{ - ctx->ops->master_start(ctx); -} - -static void disable_spu_by_master_run(struct spu_context *ctx) -{ - ctx->ops->master_stop(ctx); -} - -/* Hardcoded affinity idxs for qs20 */ -#define QS20_SPES_PER_BE 8 -static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; -static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; - -static struct spu *__init spu_lookup_reg(int node, u32 reg) -{ - struct spu *spu; - const u32 *spu_reg; - - list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { - spu_reg = of_get_property(spu_devnode(spu), "reg", NULL); - if (*spu_reg == reg) - return spu; - } - return NULL; -} - -static void __init init_affinity_qs20_harcoded(void) -{ - int node, i; - struct spu *last_spu, *spu; - u32 reg; - - for (node = 0; node < MAX_NUMNODES; node++) { - last_spu = NULL; - for (i = 0; i < QS20_SPES_PER_BE; i++) { - reg = qs20_reg_idxs[i]; - spu = spu_lookup_reg(node, reg); - if (!spu) - continue; - spu->has_mem_affinity = qs20_reg_memory[reg]; - if (last_spu) - list_add_tail(&spu->aff_list, - &last_spu->aff_list); - last_spu = spu; - } - } -} - -static int __init of_has_vicinity(void) -{ - struct device_node *dn; - - for_each_node_by_type(dn, "spe") { - if (of_property_present(dn, "vicinity")) { - of_node_put(dn); - return 1; - } - } - return 0; -} - -static struct spu *__init devnode_spu(int cbe, struct device_node *dn) -{ - struct spu *spu; - - list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) - if (spu_devnode(spu) == dn) - return spu; - return NULL; -} - -static struct spu * __init -neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid) -{ - struct spu *spu; - struct device_node *spu_dn; - const phandle *vic_handles; - int lenp, i; - - list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { - spu_dn = spu_devnode(spu); - if (spu_dn == avoid) - continue; - vic_handles = of_get_property(spu_dn, "vicinity", &lenp); - for (i=0; i < (lenp / sizeof(phandle)); i++) { - if (vic_handles[i] == target->phandle) - return spu; - } - } - return NULL; -} - -static void __init init_affinity_node(int cbe) -{ - struct spu *spu, *last_spu; - struct device_node *vic_dn, *last_spu_dn; - phandle avoid_ph; - const phandle *vic_handles; - int lenp, i, added; - - last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, - cbe_list); - avoid_ph = 0; - for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { - last_spu_dn = spu_devnode(last_spu); - vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp); - - /* - * Walk through each phandle in vicinity property of the spu - * (typically two vicinity phandles per spe node) - */ - for (i = 0; i < (lenp / sizeof(phandle)); i++) { - if (vic_handles[i] == avoid_ph) - continue; - - vic_dn = of_find_node_by_phandle(vic_handles[i]); - if (!vic_dn) - continue; - - if (of_node_name_eq(vic_dn, "spe") ) { - spu = devnode_spu(cbe, vic_dn); - avoid_ph = last_spu_dn->phandle; - } else { - /* - * "mic-tm" and "bif0" nodes do not have - * vicinity property. So we need to find the - * spe which has vic_dn as neighbour, but - * skipping the one we came from (last_spu_dn) - */ - spu = neighbour_spu(cbe, vic_dn, last_spu_dn); - if (!spu) - continue; - if (of_node_name_eq(vic_dn, "mic-tm")) { - last_spu->has_mem_affinity = 1; - spu->has_mem_affinity = 1; - } - avoid_ph = vic_dn->phandle; - } - - of_node_put(vic_dn); - - list_add_tail(&spu->aff_list, &last_spu->aff_list); - last_spu = spu; - break; - } - } -} - -static void __init init_affinity_fw(void) -{ - int cbe; - - for (cbe = 0; cbe < MAX_NUMNODES; cbe++) - init_affinity_node(cbe); -} - -static int __init init_affinity(void) -{ - if (of_has_vicinity()) { - init_affinity_fw(); - } else { - if (of_machine_is_compatible("IBM,CPBW-1.0")) - init_affinity_qs20_harcoded(); - else - printk("No affinity configuration found\n"); - } - - return 0; -} - -const struct spu_management_ops spu_management_of_ops = { - .enumerate_spus = of_enumerate_spus, - .create_spu = of_create_spu, - .destroy_spu = of_destroy_spu, - .enable_spu = enable_spu_by_master_run, - .disable_spu = disable_spu_by_master_run, - .init_affinity = init_affinity, -}; diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c deleted file mode 100644 index d150e3987304..000000000000 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ /dev/null @@ -1,167 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * spu hypervisor abstraction for direct hardware access. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * Copyright 2006 Sony Corp. - */ - -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/ptrace.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/io.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/sched.h> - -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/firmware.h> - -#include "interrupt.h" -#include "spu_priv1_mmio.h" - -static void int_mask_and(struct spu *spu, int class, u64 mask) -{ - u64 old_mask; - - old_mask = in_be64(&spu->priv1->int_mask_RW[class]); - out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); -} - -static void int_mask_or(struct spu *spu, int class, u64 mask) -{ - u64 old_mask; - - old_mask = in_be64(&spu->priv1->int_mask_RW[class]); - out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); -} - -static void int_mask_set(struct spu *spu, int class, u64 mask) -{ - out_be64(&spu->priv1->int_mask_RW[class], mask); -} - -static u64 int_mask_get(struct spu *spu, int class) -{ - return in_be64(&spu->priv1->int_mask_RW[class]); -} - -static void int_stat_clear(struct spu *spu, int class, u64 stat) -{ - out_be64(&spu->priv1->int_stat_RW[class], stat); -} - -static u64 int_stat_get(struct spu *spu, int class) -{ - return in_be64(&spu->priv1->int_stat_RW[class]); -} - -static void cpu_affinity_set(struct spu *spu, int cpu) -{ - u64 target; - u64 route; - - if (nr_cpus_node(spu->node)) { - const struct cpumask *spumask = cpumask_of_node(spu->node), - *cpumask = cpumask_of_node(cpu_to_node(cpu)); - - if (!cpumask_intersects(spumask, cpumask)) - return; - } - - target = iic_get_target_id(cpu); - route = target << 48 | target << 32 | target << 16; - out_be64(&spu->priv1->int_route_RW, route); -} - -static u64 mfc_dar_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_dar_RW); -} - -static u64 mfc_dsisr_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_dsisr_RW); -} - -static void mfc_dsisr_set(struct spu *spu, u64 dsisr) -{ - out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); -} - -static void mfc_sdr_setup(struct spu *spu) -{ - out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); -} - -static void mfc_sr1_set(struct spu *spu, u64 sr1) -{ - out_be64(&spu->priv1->mfc_sr1_RW, sr1); -} - -static u64 mfc_sr1_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_sr1_RW); -} - -static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) -{ - out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); -} - -static u64 mfc_tclass_id_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_tclass_id_RW); -} - -static void tlb_invalidate(struct spu *spu) -{ - out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); -} - -static void resource_allocation_groupID_set(struct spu *spu, u64 id) -{ - out_be64(&spu->priv1->resource_allocation_groupID_RW, id); -} - -static u64 resource_allocation_groupID_get(struct spu *spu) -{ - return in_be64(&spu->priv1->resource_allocation_groupID_RW); -} - -static void resource_allocation_enable_set(struct spu *spu, u64 enable) -{ - out_be64(&spu->priv1->resource_allocation_enable_RW, enable); -} - -static u64 resource_allocation_enable_get(struct spu *spu) -{ - return in_be64(&spu->priv1->resource_allocation_enable_RW); -} - -const struct spu_priv1_ops spu_priv1_mmio_ops = -{ - .int_mask_and = int_mask_and, - .int_mask_or = int_mask_or, - .int_mask_set = int_mask_set, - .int_mask_get = int_mask_get, - .int_stat_clear = int_stat_clear, - .int_stat_get = int_stat_get, - .cpu_affinity_set = cpu_affinity_set, - .mfc_dar_get = mfc_dar_get, - .mfc_dsisr_get = mfc_dsisr_get, - .mfc_dsisr_set = mfc_dsisr_set, - .mfc_sdr_setup = mfc_sdr_setup, - .mfc_sr1_set = mfc_sr1_set, - .mfc_sr1_get = mfc_sr1_get, - .mfc_tclass_id_set = mfc_tclass_id_set, - .mfc_tclass_id_get = mfc_tclass_id_get, - .tlb_invalidate = tlb_invalidate, - .resource_allocation_groupID_set = resource_allocation_groupID_set, - .resource_allocation_groupID_get = resource_allocation_groupID_get, - .resource_allocation_enable_set = resource_allocation_enable_set, - .resource_allocation_enable_get = resource_allocation_enable_get, -}; diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h deleted file mode 100644 index 04f0db339dc1..000000000000 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * spu hypervisor abstraction for direct hardware access. - * - * Copyright (C) 2006 Sony Computer Entertainment Inc. - * Copyright 2006 Sony Corp. - */ - -#ifndef SPU_PRIV1_MMIO_H -#define SPU_PRIV1_MMIO_H - -struct device_node *spu_devnode(struct spu *spu); - -#endif /* SPU_PRIV1_MMIO_H */ diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index 87ad7d563cfa..000894e07b02 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -36,6 +36,9 @@ static inline struct spufs_calls *spufs_calls_get(void) static inline void spufs_calls_put(struct spufs_calls *calls) { + if (!calls) + return; + BUG_ON(calls != spufs_calls); /* we don't need to rcu this, as we hold a reference to the module */ @@ -53,82 +56,55 @@ static inline void spufs_calls_put(struct spufs_calls *calls) { } #endif /* CONFIG_SPU_FS_MODULE */ +DEFINE_CLASS(spufs_calls, struct spufs_calls *, spufs_calls_put(_T), spufs_calls_get(), void) + SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, umode_t, mode, int, neighbor_fd) { - long ret; - struct spufs_calls *calls; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return -ENOSYS; if (flags & SPU_CREATE_AFFINITY_SPU) { - struct fd neighbor = fdget(neighbor_fd); - ret = -EBADF; - if (neighbor.file) { - ret = calls->create_thread(name, flags, mode, neighbor.file); - fdput(neighbor); - } - } else - ret = calls->create_thread(name, flags, mode, NULL); - - spufs_calls_put(calls); - return ret; + CLASS(fd, neighbor)(neighbor_fd); + if (fd_empty(neighbor)) + return -EBADF; + return calls->create_thread(name, flags, mode, fd_file(neighbor)); + } else { + return calls->create_thread(name, flags, mode, NULL); + } } SYSCALL_DEFINE3(spu_run,int, fd, __u32 __user *, unpc, __u32 __user *, ustatus) { - long ret; - struct fd arg; - struct spufs_calls *calls; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return -ENOSYS; - ret = -EBADF; - arg = fdget(fd); - if (arg.file) { - ret = calls->spu_run(arg.file, unpc, ustatus); - fdput(arg); - } + CLASS(fd, arg)(fd); + if (fd_empty(arg)) + return -EBADF; - spufs_calls_put(calls); - return ret; + return calls->spu_run(fd_file(arg), unpc, ustatus); } #ifdef CONFIG_COREDUMP int elf_coredump_extra_notes_size(void) { - struct spufs_calls *calls; - int ret; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return 0; - ret = calls->coredump_extra_notes_size(); - - spufs_calls_put(calls); - - return ret; + return calls->coredump_extra_notes_size(); } int elf_coredump_extra_notes_write(struct coredump_params *cprm) { - struct spufs_calls *calls; - int ret; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return 0; - ret = calls->coredump_extra_notes_write(cprm); - - spufs_calls_put(calls); - - return ret; + return calls->coredump_extra_notes_write(cprm); } #endif diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 18daafbe2e65..301ee7d8b7df 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -73,9 +73,7 @@ static struct spu_context *coredump_next_context(int *fd) return NULL; *fd = n - 1; - rcu_read_lock(); - file = lookup_fdget_rcu(*fd); - rcu_read_unlock(); + file = fget_raw(*fd); if (file) { ctx = SPUFS_I(file_inode(file))->i_ctx; get_spu_context(ctx); diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 02a8158c469d..d5a2c77bc908 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -453,7 +453,6 @@ static const struct file_operations spufs_cntl_fops = { .release = spufs_cntl_release, .read = simple_attr_read, .write = simple_attr_write, - .llseek = no_llseek, .mmap = spufs_cntl_mmap, }; @@ -634,7 +633,6 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, static const struct file_operations spufs_mbox_fops = { .open = spufs_pipe_open, .read = spufs_mbox_read, - .llseek = no_llseek, }; static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, @@ -664,7 +662,6 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_mbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_mbox_stat_read, - .llseek = no_llseek, }; /* low-level ibox access function */ @@ -769,7 +766,6 @@ static const struct file_operations spufs_ibox_fops = { .open = spufs_pipe_open, .read = spufs_ibox_read, .poll = spufs_ibox_poll, - .llseek = no_llseek, }; static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, @@ -797,7 +793,6 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_ibox_stat_fops = { .open = spufs_pipe_open, .read = spufs_ibox_stat_read, - .llseek = no_llseek, }; /* low-level mailbox write */ @@ -901,7 +896,6 @@ static const struct file_operations spufs_wbox_fops = { .open = spufs_pipe_open, .write = spufs_wbox_write, .poll = spufs_wbox_poll, - .llseek = no_llseek, }; static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, @@ -929,7 +923,6 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_wbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_wbox_stat_read, - .llseek = no_llseek, }; static int spufs_signal1_open(struct inode *inode, struct file *file) @@ -1056,7 +1049,6 @@ static const struct file_operations spufs_signal1_fops = { .read = spufs_signal1_read, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, - .llseek = no_llseek, }; static const struct file_operations spufs_signal1_nosched_fops = { @@ -1064,7 +1056,6 @@ static const struct file_operations spufs_signal1_nosched_fops = { .release = spufs_signal1_release, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, - .llseek = no_llseek, }; static int spufs_signal2_open(struct inode *inode, struct file *file) @@ -1195,7 +1186,6 @@ static const struct file_operations spufs_signal2_fops = { .read = spufs_signal2_read, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, - .llseek = no_llseek, }; static const struct file_operations spufs_signal2_nosched_fops = { @@ -1203,7 +1193,6 @@ static const struct file_operations spufs_signal2_nosched_fops = { .release = spufs_signal2_release, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, - .llseek = no_llseek, }; /* @@ -1343,7 +1332,6 @@ static const struct file_operations spufs_mss_fops = { .open = spufs_mss_open, .release = spufs_mss_release, .mmap = spufs_mss_mmap, - .llseek = no_llseek, }; static vm_fault_t @@ -1401,7 +1389,6 @@ static const struct file_operations spufs_psmap_fops = { .open = spufs_psmap_open, .release = spufs_psmap_release, .mmap = spufs_psmap_mmap, - .llseek = no_llseek, }; @@ -1704,23 +1691,11 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id) ret = spu_acquire(ctx); if (ret) - goto out; -#if 0 -/* this currently hangs */ - ret = spufs_wait(ctx->mfc_wq, - ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); - if (ret) - goto out; - ret = spufs_wait(ctx->mfc_wq, - ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); - if (ret) - goto out; -#else - ret = 0; -#endif + return ret; + spu_release(ctx); -out: - return ret; + + return 0; } static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) @@ -1744,7 +1719,6 @@ static const struct file_operations spufs_mfc_fops = { .flush = spufs_mfc_flush, .fsync = spufs_mfc_fsync, .mmap = spufs_mfc_mmap, - .llseek = no_llseek, }; static int spufs_npc_set(void *data, u64 val) @@ -2114,7 +2088,6 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, static const struct file_operations spufs_dma_info_fops = { .open = spufs_info_open, .read = spufs_dma_info_read, - .llseek = no_llseek, }; static void spufs_get_proxydma_info(struct spu_context *ctx, @@ -2171,7 +2144,6 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, static const struct file_operations spufs_proxydma_info_fops = { .open = spufs_info_open, .read = spufs_proxydma_info_read, - .llseek = no_llseek, }; static int spufs_show_tid(struct seq_file *s, void *private) @@ -2454,7 +2426,6 @@ static const struct file_operations spufs_switch_log_fops = { .read = spufs_switch_log_read, .poll = spufs_switch_log_poll, .release = spufs_switch_log_release, - .llseek = no_llseek, }; /** diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c index 827d338deaf4..2c2999de6bfa 100644 --- a/arch/powerpc/platforms/cell/spufs/gang.c +++ b/arch/powerpc/platforms/cell/spufs/gang.c @@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void) mutex_init(&gang->aff_mutex); INIT_LIST_HEAD(&gang->list); INIT_LIST_HEAD(&gang->aff_list_head); + gang->alive = 1; out: return gang; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 030de2b8c145..9f9e4b871627 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir, return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); - if (ret) + if (ret) { + dput(dentry); return ret; + } files++; } return 0; } +static void unuse_gang(struct dentry *dir) +{ + struct inode *inode = dir->d_inode; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; + + if (gang) { + bool dead; + + inode_lock(inode); // exclusion with spufs_create_context() + dead = !--gang->alive; + inode_unlock(inode); + + if (dead) + simple_recursive_removal(dir, NULL); + } +} + static int spufs_dir_close(struct inode *inode, struct file *file) { struct inode *parent; @@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) inode_unlock(parent); WARN_ON(ret); + unuse_gang(dir->d_parent); return dcache_dir_close(inode, file); } @@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, { int ret; int affinity; - struct spu_gang *gang; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; @@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) return -ENODEV; - gang = NULL; + if (gang) { + if (!gang->alive) + return -ENOENT; + gang->alive++; + } + neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { - gang = SPUFS_I(inode)->i_gang; if (!gang) return -EINVAL; mutex_lock(&gang->aff_mutex); @@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, } ret = spufs_mkdir(inode, dentry, flags, mode & 0777); - if (ret) + if (ret) { + if (neighbor) + put_spu_context(neighbor); goto out_aff_unlock; + } if (affinity) { spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, @@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); + if (ret && gang) + gang->alive--; // can't reach 0 return ret; } @@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_fop = &simple_dir_operations; d_instantiate(dentry, inode); + dget(dentry); inc_nlink(dir); inc_nlink(d_inode(dentry)); return ret; @@ -492,6 +522,21 @@ out: return ret; } +static int spufs_gang_close(struct inode *inode, struct file *file) +{ + unuse_gang(file->f_path.dentry); + return dcache_dir_close(inode, file); +} + +static const struct file_operations spufs_gang_fops = { + .open = dcache_dir_open, + .release = spufs_gang_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .iterate_shared = dcache_readdir, + .fsync = noop_fsync, +}; + static int spufs_gang_open(const struct path *path) { int ret; @@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path) return PTR_ERR(filp); } - filp->f_op = &simple_dir_operations; + filp->f_op = &spufs_gang_fops; fd_install(ret, filp); return ret; } @@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode, ret = spufs_mkgang(inode, dentry, mode & 0777); if (!ret) { ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); - } + if (ret < 0) + unuse_gang(dentry); } return ret; } @@ -822,6 +865,7 @@ static void __exit spufs_exit(void) } module_exit(spufs_exit); +MODULE_DESCRIPTION("SPU file system"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 99bd027a7f7c..8e7ed010bfde 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -508,7 +508,7 @@ static void __spu_del_from_rq(struct spu_context *ctx) if (!list_empty(&ctx->rq)) { if (!--spu_prio->nr_waiting) - del_timer(&spusched_timer); + timer_delete(&spusched_timer); list_del_init(&ctx->rq); if (list_empty(&spu_prio->runq[prio])) @@ -868,7 +868,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) } /** - * spu_deactivate - unbind a context from it's physical spu + * spu_deactivate - unbind a context from its physical spu * @ctx: spu context to unbind * * Unbind @ctx from the physical spu it is running on and schedule @@ -1126,8 +1126,8 @@ void spu_sched_exit(void) remove_proc_entry("spu_loadavg", NULL); - del_timer_sync(&spusched_timer); - del_timer_sync(&spuloadavg_timer); + timer_delete_sync(&spusched_timer); + timer_delete_sync(&spuloadavg_timer); kthread_stop(spusched_task); for (node = 0; node < MAX_NUMNODES; node++) { diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 84958487f696..d33787c57c39 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -151,6 +151,8 @@ struct spu_gang { int aff_flags; struct spu *aff_ref_spu; atomic_t aff_sched_count; + + int alive; }; /* Flag bits for spu_gang aff_flags */ diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index 0eedae96498c..d3bf56a46656 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -92,4 +92,5 @@ void __init chrp_nvram_init(void) return; } +MODULE_DESCRIPTION("PPC NVRAM device driver"); MODULE_LICENSE("GPL v2"); diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index 5c4f1a9ca154..6f4a41a9352a 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c @@ -14,7 +14,7 @@ #include <linux/ioport.h> #include <linux/device.h> #include <linux/platform_device.h> -#include <linux/mv643xx.h> +#include <linux/mv643xx_eth.h> #include <linux/pci.h> #define PEGASOS2_MARVELL_REGBASE (0xf1000000) @@ -25,12 +25,15 @@ #define PEGASOS2_SRAM_BASE_ETH_PORT0 (PEGASOS2_SRAM_BASE) #define PEGASOS2_SRAM_BASE_ETH_PORT1 (PEGASOS2_SRAM_BASE_ETH_PORT0 + (PEGASOS2_SRAM_SIZE / 2) ) - #define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #undef BE_VERBOSE +#define MV64340_BASE_ADDR_ENABLE 0x278 +#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 +#define MV64340_SRAM_CONFIG 0x380 + static struct resource mv643xx_eth_shared_resources[] = { [0] = { .name = "ethernet shared base", diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 36ee3a5056a1..c1bfa4c3444c 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -486,7 +486,7 @@ static void __init chrp_find_8259(void) i8259_init(pic, chrp_int_ack); if (ppc_md.get_irq == NULL) { ppc_md.get_irq = i8259_irq; - irq_set_default_host(i8259_get_host()); + irq_set_default_domain(i8259_get_host()); } if (chrp_mpic != NULL) { cascade_irq = irq_of_parse_and_map(pic, 0); diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index 4d9200bdba78..91a8f0a7086e 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -149,8 +149,9 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np) __flipper_quiesce(io_base); - irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS, - &flipper_irq_domain_ops, io_base); + irq_domain = irq_domain_create_linear(of_fwnode_handle(np), + FLIPPER_NR_IRQS, + &flipper_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); return NULL; @@ -172,7 +173,7 @@ unsigned int flipper_pic_get_irq(void) return 0; /* no more IRQs pending */ irq = __ffs(irq_status); - return irq_linear_revmap(flipper_irq_host, irq); + return irq_find_mapping(flipper_irq_host, irq); } /* @@ -190,7 +191,7 @@ void __init flipper_pic_probe(void) flipper_irq_host = flipper_pic_init(np); BUG_ON(!flipper_irq_host); - irq_set_default_host(flipper_irq_host); + irq_set_default_domain(flipper_irq_host); of_node_put(np); } diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 4d2d92de30af..b57e87b0b3ce 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -175,8 +175,9 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np) __hlwd_quiesce(io_base); - irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS, - &hlwd_irq_domain_ops, io_base); + irq_domain = irq_domain_create_linear(of_fwnode_handle(np), + HLWD_NR_IRQS, + &hlwd_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); iounmap(io_base); @@ -189,7 +190,7 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np) unsigned int hlwd_pic_get_irq(void) { unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host); - return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0; + return hwirq ? irq_find_mapping(hlwd_irq_host, hwirq) : 0; } /* diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index e265f026eee2..4012f206ec63 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/initrd.h> #include <linux/of_platform.h> +#include <linux/seq_file.h> #include <asm/time.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 00bec0f051be..5ca41972ef22 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c @@ -14,6 +14,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/seq_file.h> #include <asm/i8259.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig deleted file mode 100644 index 4c058cc57c90..000000000000 --- a/arch/powerpc/platforms/maple/Kconfig +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config PPC_MAPLE - depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN - bool "Maple 970FX Evaluation Board" - select FORCE_PCI - select MPIC - select U3_DART - select MPIC_U3_HT_IRQS - select GENERIC_TBSYNC - select PPC_UDBG_16550 - select PPC_970_NAP - select PPC_64S_HASH_MMU - select PPC_HASH_MMU_NATIVE - select PPC_RTAS - select MMIO_NVRAM - select ATA_NONSTANDARD if ATA - help - This option enables support for the Maple 970FX Evaluation Board. - For more information, refer to <http://www.970eval.com> diff --git a/arch/powerpc/platforms/maple/Makefile b/arch/powerpc/platforms/maple/Makefile deleted file mode 100644 index 19f35ab828a7..000000000000 --- a/arch/powerpc/platforms/maple/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-y += setup.o pci.o time.o diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h deleted file mode 100644 index 4f358b55c341..000000000000 --- a/arch/powerpc/platforms/maple/maple.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Declarations for maple-specific code. - * - * Maple is the name of a PPC970 evaluation board. - */ -extern int maple_set_rtc_time(struct rtc_time *tm); -extern void maple_get_rtc_time(struct rtc_time *tm); -extern time64_t maple_get_boot_time(void); -extern void maple_calibrate_decr(void); -extern void maple_pci_init(void); -extern void maple_pci_irq_fixup(struct pci_dev *dev); -extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel); - -extern struct pci_controller_ops maple_pci_controller_ops; diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c deleted file mode 100644 index b911b31717cc..000000000000 --- a/arch/powerpc/platforms/maple/pci.c +++ /dev/null @@ -1,672 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/of_irq.h> - -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/pci-bridge.h> -#include <asm/machdep.h> -#include <asm/iommu.h> -#include <asm/ppc-pci.h> -#include <asm/isa-bridge.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -static struct pci_controller *u3_agp, *u3_ht, *u4_pcie; - -static int __init fixup_one_level_bus_range(struct device_node *node, int higher) -{ - for (; node; node = node->sibling) { - const int *bus_range; - const unsigned int *class_code; - int len; - - /* For PCI<->PCI bridges or CardBus bridges, we go down */ - class_code = of_get_property(node, "class-code", NULL); - if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) - continue; - bus_range = of_get_property(node, "bus-range", &len); - if (bus_range != NULL && len > 2 * sizeof(int)) { - if (bus_range[1] > higher) - higher = bus_range[1]; - } - higher = fixup_one_level_bus_range(node->child, higher); - } - return higher; -} - -/* This routine fixes the "bus-range" property of all bridges in the - * system since they tend to have their "last" member wrong on macs - * - * Note that the bus numbers manipulated here are OF bus numbers, they - * are not Linux bus numbers. - */ -static void __init fixup_bus_range(struct device_node *bridge) -{ - int *bus_range; - struct property *prop; - int len; - - /* Lookup the "bus-range" property for the hose */ - prop = of_find_property(bridge, "bus-range", &len); - if (prop == NULL || prop->value == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %pOF\n", - bridge); - return; - } - bus_range = prop->value; - bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); -} - - -static unsigned long u3_agp_cfa0(u8 devfn, u8 off) -{ - return (1 << (unsigned long)PCI_SLOT(devfn)) | - ((unsigned long)PCI_FUNC(devfn) << 8) | - ((unsigned long)off & 0xFCUL); -} - -static unsigned long u3_agp_cfa1(u8 bus, u8 devfn, u8 off) -{ - return ((unsigned long)bus << 16) | - ((unsigned long)devfn << 8) | - ((unsigned long)off & 0xFCUL) | - 1UL; -} - -static volatile void __iomem *u3_agp_cfg_access(struct pci_controller* hose, - u8 bus, u8 dev_fn, u8 offset) -{ - unsigned int caddr; - - if (bus == hose->first_busno) { - if (dev_fn < (11 << 3)) - return NULL; - caddr = u3_agp_cfa0(dev_fn, offset); - } else - caddr = u3_agp_cfa1(bus, dev_fn, offset); - - /* Uninorth will return garbage if we don't read back the value ! */ - do { - out_le32(hose->cfg_addr, caddr); - } while (in_le32(hose->cfg_addr) != caddr); - - offset &= 0x07; - return hose->cfg_data + offset; -} - -static int u3_agp_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - addr = u3_agp_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static int u3_agp_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - addr = u3_agp_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u3_agp_pci_ops = -{ - .read = u3_agp_read_config, - .write = u3_agp_write_config, -}; - -static unsigned long u3_ht_cfa0(u8 devfn, u8 off) -{ - return (devfn << 8) | off; -} - -static unsigned long u3_ht_cfa1(u8 bus, u8 devfn, u8 off) -{ - return u3_ht_cfa0(devfn, off) + (bus << 16) + 0x01000000UL; -} - -static volatile void __iomem *u3_ht_cfg_access(struct pci_controller* hose, - u8 bus, u8 devfn, u8 offset) -{ - if (bus == hose->first_busno) { - if (PCI_SLOT(devfn) == 0) - return NULL; - return hose->cfg_data + u3_ht_cfa0(devfn, offset); - } else - return hose->cfg_data + u3_ht_cfa1(bus, devfn, offset); -} - -static int u3_ht_root_read_config(struct pci_controller *hose, u8 offset, - int len, u32 *val) -{ - volatile void __iomem *addr; - - addr = hose->cfg_addr; - addr += ((offset & ~3) << 2) + (4 - len - (offset & 3)); - - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_be16(addr); - break; - default: - *val = in_be32(addr); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_root_write_config(struct pci_controller *hose, u8 offset, - int len, u32 val) -{ - volatile void __iomem *addr; - - addr = hose->cfg_addr + ((offset & ~3) << 2) + (4 - len - (offset & 3)); - - if (offset >= PCI_BASE_ADDRESS_0 && offset < PCI_CAPABILITY_LIST) - return PCIBIOS_SUCCESSFUL; - - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_be16(addr, val); - break; - default: - out_be32(addr, val); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0)) - return u3_ht_root_read_config(hose, offset, len, val); - - if (offset > 0xff) - return PCIBIOS_BAD_REGISTER_NUMBER; - - addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0)) - return u3_ht_root_write_config(hose, offset, len, val); - - if (offset > 0xff) - return PCIBIOS_BAD_REGISTER_NUMBER; - - addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u3_ht_pci_ops = -{ - .read = u3_ht_read_config, - .write = u3_ht_write_config, -}; - -static unsigned int u4_pcie_cfa0(unsigned int devfn, unsigned int off) -{ - return (1 << PCI_SLOT(devfn)) | - (PCI_FUNC(devfn) << 8) | - ((off >> 8) << 28) | - (off & 0xfcu); -} - -static unsigned int u4_pcie_cfa1(unsigned int bus, unsigned int devfn, - unsigned int off) -{ - return (bus << 16) | - (devfn << 8) | - ((off >> 8) << 28) | - (off & 0xfcu) | 1u; -} - -static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose, - u8 bus, u8 dev_fn, int offset) -{ - unsigned int caddr; - - if (bus == hose->first_busno) - caddr = u4_pcie_cfa0(dev_fn, offset); - else - caddr = u4_pcie_cfa1(bus, dev_fn, offset); - - /* Uninorth will return garbage if we don't read back the value ! */ - do { - out_le32(hose->cfg_addr, caddr); - } while (in_le32(hose->cfg_addr) != caddr); - - offset &= 0x03; - return hose->cfg_data + offset; -} - -static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= 0x1000) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} -static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= 0x1000) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u4_pcie_pci_ops = -{ - .read = u4_pcie_read_config, - .write = u4_pcie_write_config, -}; - -static void __init setup_u3_agp(struct pci_controller* hose) -{ - /* On G5, we move AGP up to high bus number so we don't need - * to reassign bus numbers for HT. If we ever have P2P bridges - * on AGP, we'll have to move pci_assign_all_buses to the - * pci_controller structure so we enable it for AGP and not for - * HT childs. - * We hard code the address because of the different size of - * the reg address cell, we shall fix that by killing struct - * reg_property and using some accessor functions instead - */ - hose->first_busno = 0xf0; - hose->last_busno = 0xff; - hose->ops = &u3_agp_pci_ops; - hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); - hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); - - u3_agp = hose; -} - -static void __init setup_u4_pcie(struct pci_controller* hose) -{ - /* We currently only implement the "non-atomic" config space, to - * be optimised later. - */ - hose->ops = &u4_pcie_pci_ops; - hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); - hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); - - u4_pcie = hose; -} - -static void __init setup_u3_ht(struct pci_controller* hose) -{ - hose->ops = &u3_ht_pci_ops; - - /* We hard code the address because of the different size of - * the reg address cell, we shall fix that by killing struct - * reg_property and using some accessor functions instead - */ - hose->cfg_data = ioremap(0xf2000000, 0x02000000); - hose->cfg_addr = ioremap(0xf8070000, 0x1000); - - hose->first_busno = 0; - hose->last_busno = 0xef; - - u3_ht = hose; -} - -static int __init maple_add_bridge(struct device_node *dev) -{ - int len; - struct pci_controller *hose; - char* disp_name; - const int *bus_range; - int primary = 1; - - DBG("Adding PCI host bridge %pOF\n", dev); - - bus_range = of_get_property(dev, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %pOF, assume bus 0\n", - dev); - } - - hose = pcibios_alloc_controller(dev); - if (hose == NULL) - return -ENOMEM; - hose->first_busno = bus_range ? bus_range[0] : 0; - hose->last_busno = bus_range ? bus_range[1] : 0xff; - hose->controller_ops = maple_pci_controller_ops; - - disp_name = NULL; - if (of_device_is_compatible(dev, "u3-agp")) { - setup_u3_agp(hose); - disp_name = "U3-AGP"; - primary = 0; - } else if (of_device_is_compatible(dev, "u3-ht")) { - setup_u3_ht(hose); - disp_name = "U3-HT"; - primary = 1; - } else if (of_device_is_compatible(dev, "u4-pcie")) { - setup_u4_pcie(hose); - disp_name = "U4-PCIE"; - primary = 0; - } - printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n", - disp_name, hose->first_busno, hose->last_busno); - - /* Interpret the "ranges" property */ - /* This also maps the I/O region and sets isa_io/mem_base */ - pci_process_bridge_OF_ranges(hose, dev, primary); - - /* Fixup "bus-range" OF property */ - fixup_bus_range(dev); - - /* Check for legacy IOs */ - isa_bridge_find_early(hose); - - /* create pci_dn's for DT nodes under this PHB */ - pci_devs_phb_init_dynamic(hose); - - return 0; -} - - -void maple_pci_irq_fixup(struct pci_dev *dev) -{ - DBG(" -> maple_pci_irq_fixup\n"); - - /* Fixup IRQ for PCIe host */ - if (u4_pcie != NULL && dev->bus->number == 0 && - pci_bus_to_host(dev->bus) == u4_pcie) { - printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); - dev->irq = irq_create_mapping(NULL, 1); - if (dev->irq) - irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); - } - - /* Hide AMD8111 IDE interrupt when in legacy mode so - * the driver calls pci_get_legacy_ide_irq() - */ - if (dev->vendor == PCI_VENDOR_ID_AMD && - dev->device == PCI_DEVICE_ID_AMD_8111_IDE && - (dev->class & 5) != 5) { - dev->irq = 0; - } - - DBG(" <- maple_pci_irq_fixup\n"); -} - -static int maple_pci_root_bridge_prepare(struct pci_host_bridge *bridge) -{ - struct pci_controller *hose = pci_bus_to_host(bridge->bus); - struct device_node *np, *child; - - if (hose != u3_agp) - return 0; - - /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We - * assume there is no P2P bridge on the AGP bus, which should be a - * safe assumptions hopefully. - */ - np = hose->dn; - PCI_DN(np)->busno = 0xf0; - for_each_child_of_node(np, child) - PCI_DN(child)->busno = 0xf0; - - return 0; -} - -void __init maple_pci_init(void) -{ - struct device_node *np, *root; - struct device_node *ht = NULL; - - /* Probe root PCI hosts, that is on U3 the AGP host and the - * HyperTransport host. That one is actually "kept" around - * and actually added last as it's resource management relies - * on the AGP resources to have been setup first - */ - root = of_find_node_by_path("/"); - if (root == NULL) { - printk(KERN_CRIT "maple_find_bridges: can't find root of device tree\n"); - return; - } - for_each_child_of_node(root, np) { - if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "ht")) - continue; - if ((of_device_is_compatible(np, "u4-pcie") || - of_device_is_compatible(np, "u3-agp")) && - maple_add_bridge(np) == 0) - of_node_get(np); - - if (of_device_is_compatible(np, "u3-ht")) { - of_node_get(np); - ht = np; - } - } - of_node_put(root); - - /* Now setup the HyperTransport host if we found any - */ - if (ht && maple_add_bridge(ht) != 0) - of_node_put(ht); - - ppc_md.pcibios_root_bridge_prepare = maple_pci_root_bridge_prepare; - - /* Tell pci.c to not change any resource allocations. */ - pci_add_flags(PCI_PROBE_ONLY); -} - -int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) -{ - struct device_node *np; - unsigned int defirq = channel ? 15 : 14; - unsigned int irq; - - if (pdev->vendor != PCI_VENDOR_ID_AMD || - pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) - return defirq; - - np = pci_device_to_OF_node(pdev); - if (np == NULL) { - printk("Failed to locate OF node for IDE %s\n", - pci_name(pdev)); - return defirq; - } - irq = irq_of_parse_and_map(np, channel & 0x1); - if (!irq) { - printk("Failed to map onboard IDE interrupt for channel %d\n", - channel); - return defirq; - } - return irq; -} - -static void quirk_ipr_msi(struct pci_dev *dev) -{ - /* Something prevents MSIs from the IPR from working on Bimini, - * and the driver has no smarts to recover. So disable MSI - * on it for now. */ - - if (machine_is(maple)) { - dev->no_msi = 1; - dev_info(&dev->dev, "Quirk disabled MSI\n"); - } -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - quirk_ipr_msi); - -struct pci_controller_ops maple_pci_controller_ops = { -}; diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c deleted file mode 100644 index f329a03edf4a..000000000000 --- a/arch/powerpc/platforms/maple/setup.c +++ /dev/null @@ -1,363 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Maple (970 eval board) setup code - * - * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/user.h> -#include <linux/tty.h> -#include <linux/string.h> -#include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/major.h> -#include <linux/initrd.h> -#include <linux/vt_kern.h> -#include <linux/console.h> -#include <linux/pci.h> -#include <linux/adb.h> -#include <linux/cuda.h> -#include <linux/pmu.h> -#include <linux/irq.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/serial.h> -#include <linux/smp.h> -#include <linux/bitops.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/platform_device.h> -#include <linux/memblock.h> - -#include <asm/processor.h> -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/pci-bridge.h> -#include <asm/iommu.h> -#include <asm/machdep.h> -#include <asm/dma.h> -#include <asm/cputable.h> -#include <asm/time.h> -#include <asm/mpic.h> -#include <asm/rtas.h> -#include <asm/udbg.h> -#include <asm/nvram.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -static unsigned long maple_find_nvram_base(void) -{ - struct device_node *rtcs; - unsigned long result = 0; - - /* find NVRAM device */ - rtcs = of_find_compatible_node(NULL, "nvram", "AMD8111"); - if (rtcs) { - struct resource r; - if (of_address_to_resource(rtcs, 0, &r)) { - printk(KERN_EMERG "Maple: Unable to translate NVRAM" - " address\n"); - goto bail; - } - if (!(r.flags & IORESOURCE_IO)) { - printk(KERN_EMERG "Maple: NVRAM address isn't PIO!\n"); - goto bail; - } - result = r.start; - } else - printk(KERN_EMERG "Maple: Unable to find NVRAM\n"); - bail: - of_node_put(rtcs); - return result; -} - -static void __noreturn maple_restart(char *cmd) -{ - unsigned int maple_nvram_base; - const unsigned int *maple_nvram_offset, *maple_nvram_command; - struct device_node *sp; - - maple_nvram_base = maple_find_nvram_base(); - if (maple_nvram_base == 0) - goto fail; - - /* find service processor device */ - sp = of_find_node_by_name(NULL, "service-processor"); - if (!sp) { - printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); - goto fail; - } - maple_nvram_offset = of_get_property(sp, "restart-addr", NULL); - maple_nvram_command = of_get_property(sp, "restart-value", NULL); - of_node_put(sp); - - /* send command */ - outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); - for (;;) ; - fail: - printk(KERN_EMERG "Maple: Manual Restart Required\n"); - for (;;) ; -} - -static void __noreturn maple_power_off(void) -{ - unsigned int maple_nvram_base; - const unsigned int *maple_nvram_offset, *maple_nvram_command; - struct device_node *sp; - - maple_nvram_base = maple_find_nvram_base(); - if (maple_nvram_base == 0) - goto fail; - - /* find service processor device */ - sp = of_find_node_by_name(NULL, "service-processor"); - if (!sp) { - printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); - goto fail; - } - maple_nvram_offset = of_get_property(sp, "power-off-addr", NULL); - maple_nvram_command = of_get_property(sp, "power-off-value", NULL); - of_node_put(sp); - - /* send command */ - outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); - for (;;) ; - fail: - printk(KERN_EMERG "Maple: Manual Power-Down Required\n"); - for (;;) ; -} - -static void __noreturn maple_halt(void) -{ - maple_power_off(); -} - -#ifdef CONFIG_SMP -static struct smp_ops_t maple_smp_ops = { - .probe = smp_mpic_probe, - .message_pass = smp_mpic_message_pass, - .kick_cpu = smp_generic_kick_cpu, - .setup_cpu = smp_mpic_setup_cpu, - .give_timebase = smp_generic_give_timebase, - .take_timebase = smp_generic_take_timebase, -}; -#endif /* CONFIG_SMP */ - -static void __init maple_use_rtas_reboot_and_halt_if_present(void) -{ - if (rtas_function_implemented(RTAS_FN_SYSTEM_REBOOT) && - rtas_function_implemented(RTAS_FN_POWER_OFF)) { - ppc_md.restart = rtas_restart; - pm_power_off = rtas_power_off; - ppc_md.halt = rtas_halt; - } -} - -static void __init maple_setup_arch(void) -{ - /* init to some ~sane value until calibrate_delay() runs */ - loops_per_jiffy = 50000000; - - /* Setup SMP callback */ -#ifdef CONFIG_SMP - smp_ops = &maple_smp_ops; -#endif - maple_use_rtas_reboot_and_halt_if_present(); - - printk(KERN_DEBUG "Using native/NAP idle loop\n"); - - mmio_nvram_init(); -} - -/* - * This is almost identical to pSeries and CHRP. We need to make that - * code generic at one point, with appropriate bits in the device-tree to - * identify the presence of an HT APIC - */ -static void __init maple_init_IRQ(void) -{ - struct device_node *root, *np, *mpic_node = NULL; - const unsigned int *opprop; - unsigned long openpic_addr = 0; - int naddr, n, i, opplen, has_isus = 0; - struct mpic *mpic; - unsigned int flags = 0; - - /* Locate MPIC in the device-tree. Note that there is a bug - * in Maple device-tree where the type of the controller is - * open-pic and not interrupt-controller - */ - - for_each_node_by_type(np, "interrupt-controller") - if (of_device_is_compatible(np, "open-pic")) { - mpic_node = np; - break; - } - if (mpic_node == NULL) - for_each_node_by_type(np, "open-pic") { - mpic_node = np; - break; - } - if (mpic_node == NULL) { - printk(KERN_ERR - "Failed to locate the MPIC interrupt controller\n"); - return; - } - - /* Find address list in /platform-open-pic */ - root = of_find_node_by_path("/"); - naddr = of_n_addr_cells(root); - opprop = of_get_property(root, "platform-open-pic", &opplen); - if (opprop) { - openpic_addr = of_read_number(opprop, naddr); - has_isus = (opplen > naddr); - printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", - openpic_addr, has_isus); - } - - BUG_ON(openpic_addr == 0); - - /* Check for a big endian MPIC */ - if (of_property_read_bool(np, "big-endian")) - flags |= MPIC_BIG_ENDIAN; - - /* XXX Maple specific bits */ - flags |= MPIC_U3_HT_IRQS; - /* All U3/U4 are big-endian, older SLOF firmware doesn't encode this */ - flags |= MPIC_BIG_ENDIAN; - - /* Setup the openpic driver. More device-tree junks, we hard code no - * ISUs for now. I'll have to revisit some stuffs with the folks doing - * the firmware for those - */ - mpic = mpic_alloc(mpic_node, openpic_addr, flags, - /*has_isus ? 16 :*/ 0, 0, " MPIC "); - BUG_ON(mpic == NULL); - - /* Add ISUs */ - opplen /= sizeof(u32); - for (n = 0, i = naddr; i < opplen; i += naddr, n++) { - unsigned long isuaddr = of_read_number(opprop + i, naddr); - mpic_assign_isu(mpic, n, isuaddr); - } - - /* All ISUs are setup, complete initialization */ - mpic_init(mpic); - ppc_md.get_irq = mpic_get_irq; - of_node_put(mpic_node); - of_node_put(root); -} - -static void __init maple_progress(char *s, unsigned short hex) -{ - printk("*** %04x : %s\n", hex, s ? s : ""); -} - - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init maple_probe(void) -{ - if (!of_machine_is_compatible("Momentum,Maple") && - !of_machine_is_compatible("Momentum,Apache")) - return 0; - - pm_power_off = maple_power_off; - - iommu_init_early_dart(&maple_pci_controller_ops); - - return 1; -} - -#ifdef CONFIG_EDAC -/* - * Register a platform device for CPC925 memory controller on - * all boards with U3H (CPC925) bridge. - */ -static int __init maple_cpc925_edac_setup(void) -{ - struct platform_device *pdev; - struct device_node *np = NULL; - struct resource r; - int ret; - volatile void __iomem *mem; - u32 rev; - - np = of_find_node_by_type(NULL, "memory-controller"); - if (!np) { - printk(KERN_ERR "%s: Unable to find memory-controller node\n", - __func__); - return -ENODEV; - } - - ret = of_address_to_resource(np, 0, &r); - of_node_put(np); - - if (ret < 0) { - printk(KERN_ERR "%s: Unable to get memory-controller reg\n", - __func__); - return -ENODEV; - } - - mem = ioremap(r.start, resource_size(&r)); - if (!mem) { - printk(KERN_ERR "%s: Unable to map memory-controller memory\n", - __func__); - return -ENOMEM; - } - - rev = __raw_readl(mem); - iounmap(mem); - - if (rev < 0x34 || rev > 0x3f) { /* U3H */ - printk(KERN_ERR "%s: Non-CPC925(U3H) bridge revision: %02x\n", - __func__, rev); - return 0; - } - - pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - printk(KERN_INFO "%s: CPC925 platform device created\n", __func__); - - return 0; -} -machine_device_initcall(maple, maple_cpc925_edac_setup); -#endif - -define_machine(maple) { - .name = "Maple", - .probe = maple_probe, - .setup_arch = maple_setup_arch, - .discover_phbs = maple_pci_init, - .init_IRQ = maple_init_IRQ, - .pci_irq_fixup = maple_pci_irq_fixup, - .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, - .restart = maple_restart, - .halt = maple_halt, - .get_boot_time = maple_get_boot_time, - .set_rtc_time = maple_set_rtc_time, - .get_rtc_time = maple_get_rtc_time, - .progress = maple_progress, - .power_save = power4_idle, -}; diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c deleted file mode 100644 index 91606411d2e0..000000000000 --- a/arch/powerpc/platforms/maple/time.c +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/init.h> -#include <linux/time.h> -#include <linux/adb.h> -#include <linux/pmu.h> -#include <linux/interrupt.h> -#include <linux/mc146818rtc.h> -#include <linux/bcd.h> -#include <linux/of_address.h> - -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/time.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -static int maple_rtc_addr; - -static int maple_clock_read(int addr) -{ - outb_p(addr, maple_rtc_addr); - return inb_p(maple_rtc_addr+1); -} - -static void maple_clock_write(unsigned long val, int addr) -{ - outb_p(addr, maple_rtc_addr); - outb_p(val, maple_rtc_addr+1); -} - -void maple_get_rtc_time(struct rtc_time *tm) -{ - do { - tm->tm_sec = maple_clock_read(RTC_SECONDS); - tm->tm_min = maple_clock_read(RTC_MINUTES); - tm->tm_hour = maple_clock_read(RTC_HOURS); - tm->tm_mday = maple_clock_read(RTC_DAY_OF_MONTH); - tm->tm_mon = maple_clock_read(RTC_MONTH); - tm->tm_year = maple_clock_read(RTC_YEAR); - } while (tm->tm_sec != maple_clock_read(RTC_SECONDS)); - - if (!(maple_clock_read(RTC_CONTROL) & RTC_DM_BINARY) - || RTC_ALWAYS_BCD) { - tm->tm_sec = bcd2bin(tm->tm_sec); - tm->tm_min = bcd2bin(tm->tm_min); - tm->tm_hour = bcd2bin(tm->tm_hour); - tm->tm_mday = bcd2bin(tm->tm_mday); - tm->tm_mon = bcd2bin(tm->tm_mon); - tm->tm_year = bcd2bin(tm->tm_year); - } - if ((tm->tm_year + 1900) < 1970) - tm->tm_year += 100; - - tm->tm_wday = -1; -} - -int maple_set_rtc_time(struct rtc_time *tm) -{ - unsigned char save_control, save_freq_select; - int sec, min, hour, mon, mday, year; - - spin_lock(&rtc_lock); - - save_control = maple_clock_read(RTC_CONTROL); /* tell the clock it's being set */ - - maple_clock_write((save_control|RTC_SET), RTC_CONTROL); - - save_freq_select = maple_clock_read(RTC_FREQ_SELECT); /* stop and reset prescaler */ - - maple_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - sec = tm->tm_sec; - min = tm->tm_min; - hour = tm->tm_hour; - mon = tm->tm_mon; - mday = tm->tm_mday; - year = tm->tm_year; - - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - sec = bin2bcd(sec); - min = bin2bcd(min); - hour = bin2bcd(hour); - mon = bin2bcd(mon); - mday = bin2bcd(mday); - year = bin2bcd(year); - } - maple_clock_write(sec, RTC_SECONDS); - maple_clock_write(min, RTC_MINUTES); - maple_clock_write(hour, RTC_HOURS); - maple_clock_write(mon, RTC_MONTH); - maple_clock_write(mday, RTC_DAY_OF_MONTH); - maple_clock_write(year, RTC_YEAR); - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - maple_clock_write(save_control, RTC_CONTROL); - maple_clock_write(save_freq_select, RTC_FREQ_SELECT); - - spin_unlock(&rtc_lock); - - return 0; -} - -static struct resource rtc_iores = { - .name = "rtc", - .flags = IORESOURCE_IO | IORESOURCE_BUSY, -}; - -time64_t __init maple_get_boot_time(void) -{ - struct rtc_time tm; - struct device_node *rtcs; - - rtcs = of_find_compatible_node(NULL, "rtc", "pnpPNP,b00"); - if (rtcs) { - struct resource r; - if (of_address_to_resource(rtcs, 0, &r)) { - printk(KERN_EMERG "Maple: Unable to translate RTC" - " address\n"); - goto bail; - } - if (!(r.flags & IORESOURCE_IO)) { - printk(KERN_EMERG "Maple: RTC address isn't PIO!\n"); - goto bail; - } - maple_rtc_addr = r.start; - printk(KERN_INFO "Maple: Found RTC at IO 0x%x\n", - maple_rtc_addr); - } - bail: - of_node_put(rtcs); - if (maple_rtc_addr == 0) { - maple_rtc_addr = RTC_PORT(0); /* legacy address */ - printk(KERN_INFO "Maple: No device node for RTC, assuming " - "legacy address (0x%x)\n", maple_rtc_addr); - } - - rtc_iores.start = maple_rtc_addr; - rtc_iores.end = maple_rtc_addr + 7; - request_resource(&ioport_resource, &rtc_iores); - - maple_get_rtc_time(&tm); - return rtc_tm_to_time64(&tm); -} - diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig index 6af443a1db99..cb2aff635bb0 100644 --- a/arch/powerpc/platforms/microwatt/Kconfig +++ b/arch/powerpc/platforms/microwatt/Kconfig @@ -1,11 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 config PPC_MICROWATT - depends on PPC_BOOK3S_64 && !SMP + depends on PPC_BOOK3S_64 bool "Microwatt SoC platform" select PPC_XICS select PPC_ICS_NATIVE select PPC_ICP_NATIVE select PPC_UDBG_16550 + select COMMON_CLK help This option enables support for FPGA-based Microwatt implementations. diff --git a/arch/powerpc/platforms/microwatt/Makefile b/arch/powerpc/platforms/microwatt/Makefile index 116d6d3ad3f0..d973b2ab4042 100644 --- a/arch/powerpc/platforms/microwatt/Makefile +++ b/arch/powerpc/platforms/microwatt/Makefile @@ -1 +1,2 @@ obj-y += setup.o rng.o +obj-$(CONFIG_SMP) += smp.o diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h index 335417e95e66..891aa2800768 100644 --- a/arch/powerpc/platforms/microwatt/microwatt.h +++ b/arch/powerpc/platforms/microwatt/microwatt.h @@ -3,5 +3,6 @@ #define _MICROWATT_H void microwatt_rng_init(void); +void microwatt_init_smp(void); #endif /* _MICROWATT_H */ diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c index 5e1c0997170d..6af2ccef736c 100644 --- a/arch/powerpc/platforms/microwatt/setup.c +++ b/arch/powerpc/platforms/microwatt/setup.c @@ -29,15 +29,33 @@ static int __init microwatt_populate(void) } machine_arch_initcall(microwatt, microwatt_populate); +static int __init microwatt_probe(void) +{ + /* Main reason for having this is to start the other CPU(s) */ + if (IS_ENABLED(CONFIG_SMP)) + microwatt_init_smp(); + return 1; +} + static void __init microwatt_setup_arch(void) { microwatt_rng_init(); } +static void microwatt_idle(void) +{ + if (!prep_irq_for_idle_irqsoff()) + return; + + __asm__ __volatile__ ("wait"); +} + define_machine(microwatt) { .name = "microwatt", .compatible = "microwatt-soc", + .probe = microwatt_probe, .init_IRQ = microwatt_init_IRQ, .setup_arch = microwatt_setup_arch, .progress = udbg_progress, + .power_save = microwatt_idle, }; diff --git a/arch/powerpc/platforms/microwatt/smp.c b/arch/powerpc/platforms/microwatt/smp.c new file mode 100644 index 000000000000..7dbf2ca73d47 --- /dev/null +++ b/arch/powerpc/platforms/microwatt/smp.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * SMP support functions for Microwatt + * Copyright 2025 Paul Mackerras <paulus@ozlabs.org> + */ + +#include <linux/kernel.h> +#include <linux/smp.h> +#include <linux/io.h> +#include <asm/early_ioremap.h> +#include <asm/ppc-opcode.h> +#include <asm/reg.h> +#include <asm/smp.h> +#include <asm/xics.h> + +#include "microwatt.h" + +static void __init microwatt_smp_probe(void) +{ + xics_smp_probe(); +} + +static void microwatt_smp_setup_cpu(int cpu) +{ + if (cpu != 0) + xics_setup_cpu(); +} + +static struct smp_ops_t microwatt_smp_ops = { + .probe = microwatt_smp_probe, + .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ + .kick_cpu = smp_generic_kick_cpu, + .setup_cpu = microwatt_smp_setup_cpu, +}; + +/* XXX get from device tree */ +#define SYSCON_BASE 0xc0000000 +#define SYSCON_LENGTH 0x100 + +#define SYSCON_CPU_CTRL 0x58 + +void __init microwatt_init_smp(void) +{ + volatile unsigned char __iomem *syscon; + int ncpus; + int timeout; + + syscon = early_ioremap(SYSCON_BASE, SYSCON_LENGTH); + if (syscon == NULL) { + pr_err("Failed to map SYSCON\n"); + return; + } + ncpus = (readl(syscon + SYSCON_CPU_CTRL) >> 8) & 0xff; + if (ncpus < 2) + goto out; + + smp_ops = µwatt_smp_ops; + + /* + * Write two instructions at location 0: + * mfspr r3, PIR + * b __secondary_hold + */ + *(unsigned int *)KERNELBASE = PPC_RAW_MFSPR(3, SPRN_PIR); + *(unsigned int *)(KERNELBASE+4) = PPC_RAW_BRANCH(&__secondary_hold - (char *)(KERNELBASE+4)); + + /* enable the other CPUs, they start at location 0 */ + writel((1ul << ncpus) - 1, syscon + SYSCON_CPU_CTRL); + + timeout = 10000; + while (!__secondary_hold_acknowledge) { + if (--timeout == 0) + break; + barrier(); + } + + out: + early_iounmap((void *)syscon, SYSCON_LENGTH); +} diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index 4e983af32949..e4538d471256 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -285,7 +285,7 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match); static struct platform_driver gpio_mdio_driver = { .probe = gpio_mdio_probe, - .remove_new = gpio_mdio_remove, + .remove = gpio_mdio_remove, .driver = { .name = "gpio-mdio-bitbang", .of_match_table = gpio_mdio_match, diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h index 018c30665e1b..6f6743b8e48d 100644 --- a/arch/powerpc/platforms/pasemi/pasemi.h +++ b/arch/powerpc/platforms/pasemi/pasemi.h @@ -5,7 +5,6 @@ extern time64_t pas_get_boot_time(void); extern void pas_pci_init(void); struct pci_dev; -extern void pas_pci_irq_fixup(struct pci_dev *dev); extern void pas_pci_dma_dev_setup(struct pci_dev *dev); void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset); diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 0761d98e5be3..d03b41336901 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -228,7 +228,7 @@ static void __init nemo_init_IRQ(struct mpic *mpic) irq_set_chained_handler(gpio_virq, sb600_8259_cascade); mpic_unmask_irq(irq_get_irq_data(gpio_virq)); - irq_set_default_host(mpic->irqhost); + irq_set_default_domain(mpic->irqhost); } #else diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index 12bc01353bd3..79741370c40c 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c @@ -57,18 +57,10 @@ struct backlight_device *pmac_backlight; int pmac_has_backlight_type(const char *type) { struct device_node* bk_node = of_find_node_by_name(NULL, "backlight"); + int i = of_property_match_string(bk_node, "backlight-control", type); - if (bk_node) { - const char *prop = of_get_property(bk_node, - "backlight-control", NULL); - if (prop && strncmp(prop, type, strlen(type)) == 0) { - of_node_put(bk_node); - return 1; - } - of_node_put(bk_node); - } - - return 0; + of_node_put(bk_node); + return i >= 0; } static void pmac_backlight_key_worker(struct work_struct *work) diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index c097d591670e..02474e27df9b 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -347,7 +347,7 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id) unsigned long flags; spin_lock_irqsave(&host->lock, flags); - del_timer(&host->timeout_timer); + timer_delete(&host->timeout_timer); kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); if (host->state != state_idle) { host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; @@ -359,7 +359,8 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id) static void kw_i2c_timeout(struct timer_list *t) { - struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer); + struct pmac_i2c_host_kw *host = timer_container_of(host, t, + timeout_timer); unsigned long flags; spin_lock_irqsave(&host->lock, flags); diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index fe2e0249cbc2..a112d26185a0 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -514,10 +514,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } - nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); - if (!nvram_image) - panic("%s: Failed to allocate %u bytes\n", __func__, - NVRAM_SIZE); + nvram_image = memblock_alloc_or_panic(NVRAM_SIZE, SMP_CACHE_BYTES); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index 085e0ad20eba..8253de737373 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -313,7 +313,7 @@ static void __init uninorth_install_pfunc(void) /* * Install handlers for the hwclock child if any */ - for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;) + for_each_child_of_node(uninorth_node, np) if (of_node_name_eq(np, "hw-clock")) { unin_hwclock = np; break; diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 7135ea1d7db6..c37783a03d25 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -2,7 +2,7 @@ /* * Support for the interrupt controllers found on Power Macintosh, * currently Apple's "Grand Central" interrupt controller in all - * it's incarnations. OpenPIC support used on newer machines is + * its incarnations. OpenPIC support used on newer machines is * in a separate file * * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) @@ -250,7 +250,7 @@ static unsigned int pmac_pic_get_irq(void) raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); if (unlikely(irq < 0)) return 0; - return irq_linear_revmap(pmac_pic_host, irq); + return irq_find_mapping(pmac_pic_host, irq); } static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, @@ -327,10 +327,11 @@ static void __init pmac_pic_probe_oldstyle(void) /* * Allocate an irq host */ - pmac_pic_host = irq_domain_add_linear(master, max_irqs, - &pmac_pic_host_ops, NULL); + pmac_pic_host = irq_domain_create_linear(of_fwnode_handle(master), + max_irqs, + &pmac_pic_host_ops, NULL); BUG_ON(pmac_pic_host == NULL); - irq_set_default_host(pmac_pic_host); + irq_set_default_domain(pmac_pic_host); /* Get addresses of first controller if we have a node for it */ BUG_ON(of_address_to_resource(master, 0, &r)); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 6de1cd5d8a58..e119ced05d10 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -45,6 +45,7 @@ #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> +#include <linux/string_choices.h> #include <linux/of.h> #include <linux/of_platform.h> @@ -238,8 +239,7 @@ static void __init l2cr_init(void) _set_L2CR(0); _set_L2CR(*l2cr); pr_info("L2CR overridden (0x%x), backside cache is %s\n", - *l2cr, ((*l2cr) & 0x80000000) ? - "enabled" : "disabled"); + *l2cr, str_enabled_disabled((*l2cr) & 0x80000000)); } of_node_put(np); break; diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index d497a60003d2..822ed70cdcbf 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -176,7 +176,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) * memory location containing the PC to resume from * at address 0. * - On Core99, we must store the wakeup vector at - * address 0x80 and eventually it's parameters + * address 0x80 and eventually its parameters * at address 0x84. I've have some trouble with those * parameters however and I no longer use them. */ diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 15644be31990..88e92af8acf9 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -35,7 +35,7 @@ #include <asm/ptrace.h> #include <linux/atomic.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/sections.h> @@ -190,7 +190,7 @@ static int __init psurge_secondary_ipi_init(void) { int rc = -ENOMEM; - psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL); + psurge_host = irq_domain_create_nomap(NULL, ~0, &psurge_host_ops, NULL); if (psurge_host) psurge_secondary_virq = irq_create_direct_mapping(psurge_host); @@ -827,7 +827,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - patch_instruction(vector, ppc_inst(save_vector)); + patch_uint(vector, save_vector); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 8633891b7aa5..b4426a35aca3 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/time.h> @@ -77,7 +78,7 @@ long __init pmac_time_init(void) delta |= 0xFF000000UL; dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0); printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60, - dst ? "on" : "off"); + str_on_off(dst)); #endif return delta; } diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index 70a46acc70d6..95d7ba73d43d 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -17,6 +17,7 @@ config PPC_POWERNV select MMU_NOTIFIER select FORCE_SMP select ARCH_SUPPORTS_PER_VMA_LOCK + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU default y config OPAL_PRD diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 19f0fc5c6f1b..9e5d0c847ee2 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o obj-$(CONFIG_OPAL_CORE) += opal-core.o obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o obj-$(CONFIG_PCI_IOV) += pci-sriov.o -obj-$(CONFIG_CXL_BASE) += pci-cxl.o obj-$(CONFIG_EEH) += eeh-powernv.o obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o obj-$(CONFIG_OPAL_PRD) += opal-prd.o diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index af3a5d37a149..db3370d1673c 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -99,7 +99,6 @@ static ssize_t pnv_eeh_ei_write(struct file *filp, static const struct file_operations pnv_eeh_ei_fops = { .open = simple_open, - .llseek = no_llseek, .write = pnv_eeh_ei_write, }; @@ -860,7 +859,7 @@ static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) int64_t rc; /* Hot reset to the bus if firmware cannot handle */ - if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) + if (!dn || !of_property_present(dn, "ibm,reset-by-firmware")) return __pnv_eeh_bridge_reset(pdev, option); pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index ad41dffe4d92..d98b933e4984 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -18,7 +18,7 @@ #include <asm/opal.h> #include <asm/cputhreads.h> #include <asm/cpuidle.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/smp.h> #include <asm/runlatch.h> #include <asm/dbell.h> diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 877720c64515..2ea30b343354 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -48,11 +48,15 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf, static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma) { struct memtrace_entry *ent = filp->private_data; + unsigned long ent_nrpages = ent->size >> PAGE_SHIFT; + unsigned long vma_nrpages = vma_pages(vma); - if (ent->size < vma->vm_end - vma->vm_start) + /* The requested page offset should be within object's page count */ + if (vma->vm_pgoff >= ent_nrpages) return -EINVAL; - if (vma->vm_pgoff << PAGE_SHIFT >= ent->size) + /* The requested mapping range should remain within the bounds */ + if (vma_nrpages > ent_nrpages - vma->vm_pgoff) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -88,26 +92,6 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, } } -static void memtrace_clear_range(unsigned long start_pfn, - unsigned long nr_pages) -{ - unsigned long pfn; - - /* As HIGHMEM does not apply, use clear_page() directly. */ - for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { - if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) - cond_resched(); - clear_page(__va(PFN_PHYS(pfn))); - } - /* - * Before we go ahead and use this range as cache inhibited range - * flush the cache. - */ - flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), - (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), - FLUSH_CHUNK_SIZE); -} - static u64 memtrace_alloc_node(u32 nid, u64 size) { const unsigned long nr_pages = PHYS_PFN(size); @@ -119,17 +103,18 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) * by alloc_contig_pages(). */ page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | - __GFP_NOWARN, nid, NULL); + __GFP_NOWARN | __GFP_ZERO, nid, NULL); if (!page) return 0; start_pfn = page_to_pfn(page); /* - * Clear the range while we still have a linear mapping. - * - * TODO: use __GFP_ZERO with alloc_contig_pages() once supported. + * Before we go ahead and use this range as cache inhibited range + * flush the cache. */ - memtrace_clear_range(start_pfn, nr_pages); + flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), + (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), + FLUSH_CHUNK_SIZE); /* * Set pages PageOffline(), to indicate that nobody (e.g., hibernation, diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c index c9a9b759cc92..e652da8f986f 100644 --- a/arch/powerpc/platforms/powernv/opal-core.c +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -149,7 +149,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf, /* end of vector */ bufp[idx++] = cpu_to_be64(AT_NULL); - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_AUXV, + buf = append_elf64_note(buf, NN_AUXV, NT_AUXV, oc_conf->auxv_buf, AUXV_DESC_SZ); return buf; } @@ -159,7 +159,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf, * Returns number of bytes read on success, -errno on failure. */ static ssize_t read_opalcore(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { struct opalcore *m; @@ -206,9 +206,9 @@ static ssize_t read_opalcore(struct file *file, struct kobject *kobj, return (tpos - pos); } -static struct bin_attribute opal_core_attr = { +static struct bin_attribute opal_core_attr __ro_after_init = { .attr = {.name = "core", .mode = 0400}, - .read = read_opalcore + .read_new = read_opalcore }; /* @@ -252,7 +252,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) * crashing CPU's prstatus. */ first_cpu_note = buf; - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, + buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); for (i = 0; i < oc_conf->num_cpus; i++, bufp += size_per_thread) { @@ -279,7 +279,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) fill_prstatus(&prstatus, thread_pir, ®s); if (thread_pir != oc_conf->crashing_cpu) { - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, + buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); } else { @@ -287,7 +287,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) * Add crashing CPU as the first NT_PRSTATUS note for * GDB to process the core file appropriately. */ - append_elf64_note(first_cpu_note, CRASH_CORE_NOTE_NAME, + append_elf64_note(first_cpu_note, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); } @@ -599,7 +599,7 @@ static struct attribute *mpipl_attr[] = { NULL, }; -static struct bin_attribute *mpipl_bin_attr[] = { +static const struct bin_attribute *const mpipl_bin_attr[] = { &opal_core_attr, NULL, @@ -607,7 +607,7 @@ static struct bin_attribute *mpipl_bin_attr[] = { static const struct attribute_group mpipl_group = { .attrs = mpipl_attr, - .bin_attrs = mpipl_bin_attr, + .bin_attrs_new = mpipl_bin_attr, }; static int __init opalcore_init(void) diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index 16c5860f1372..27e25693cf39 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c @@ -210,7 +210,7 @@ static struct attribute *dump_default_attrs[] = { }; ATTRIBUTE_GROUPS(dump_default); -static struct kobj_type dump_ktype = { +static const struct kobj_type dump_ktype = { .sysfs_ops = &dump_sysfs_ops, .release = &dump_release, .default_groups = dump_default_groups, @@ -286,7 +286,7 @@ out: } static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { ssize_t rc; @@ -342,7 +342,7 @@ static void create_dump_obj(uint32_t id, size_t size, uint32_t type) dump->dump_attr.attr.name = "dump"; dump->dump_attr.attr.mode = 0400; dump->dump_attr.size = size; - dump->dump_attr.read = dump_attr_read; + dump->dump_attr.read_new = dump_attr_read; dump->id = id; dump->size = size; diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 554fdd7f88b8..de33f354e9fd 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -146,7 +146,7 @@ static struct attribute *elog_default_attrs[] = { }; ATTRIBUTE_GROUPS(elog_default); -static struct kobj_type elog_ktype = { +static const struct kobj_type elog_ktype = { .sysfs_ops = &elog_sysfs_ops, .release = &elog_release, .default_groups = elog_default_groups, @@ -156,7 +156,7 @@ static struct kobj_type elog_ktype = { #define OPAL_MAX_ERRLOG_SIZE 16384 static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int opal_rc; @@ -203,7 +203,7 @@ static void create_elog_obj(uint64_t id, size_t size, uint64_t type) elog->raw_attr.attr.name = "raw"; elog->raw_attr.attr.mode = 0400; elog->raw_attr.size = size; - elog->raw_attr.read = raw_attr_read; + elog->raw_attr.read_new = raw_attr_read; elog->id = id; elog->size = size; diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c index 964f464b1b0e..c9c1dfb35464 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.c +++ b/arch/powerpc/platforms/powernv/opal-fadump.c @@ -513,8 +513,8 @@ out: final_note(note_buf); pr_debug("Updating elfcore header (%llx) with cpu notes\n", - fdh->elfcorehdr_addr); - fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); + fadump_conf->elfcorehdr_addr); + fadump_update_elfcore_header((char *)fadump_conf->elfcorehdr_addr); return 0; } @@ -526,12 +526,7 @@ static int __init opal_fadump_process(struct fw_dump *fadump_conf) if (!opal_fdm_active || !fadump_conf->fadumphdr_addr) return rc; - /* Validate the fadump crash info header */ fdh = __va(fadump_conf->fadumphdr_addr); - if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { - pr_err("Crash info header is not valid.\n"); - return rc; - } #ifdef CONFIG_OPAL_CORE /* @@ -545,18 +540,7 @@ static int __init opal_fadump_process(struct fw_dump *fadump_conf) kernel_initiated = true; #endif - rc = opal_fadump_build_cpu_notes(fadump_conf, fdh); - if (rc) - return rc; - - /* - * We are done validating dump info and elfcore header is now ready - * to be exported. set elfcorehdr_addr so that vmcore module will - * export the elfcore header through '/proc/vmcore'. - */ - elfcorehdr_addr = fdh->elfcorehdr_addr; - - return rc; + return opal_fadump_build_cpu_notes(fadump_conf, fdh); } static void opal_fadump_region_show(struct fw_dump *fadump_conf, @@ -615,6 +599,12 @@ static void opal_fadump_trigger(struct fadump_crash_info_header *fdh, pr_emerg("No backend support for MPIPL!\n"); } +/* FADUMP_MAX_MEM_REGS or lower */ +static int opal_fadump_max_boot_mem_rgns(void) +{ + return FADUMP_MAX_MEM_REGS; +} + static struct fadump_ops opal_fadump_ops = { .fadump_init_mem_struct = opal_fadump_init_mem_struct, .fadump_get_metadata_size = opal_fadump_get_metadata_size, @@ -627,6 +617,7 @@ static struct fadump_ops opal_fadump_ops = { .fadump_process = opal_fadump_process, .fadump_region_show = opal_fadump_region_show, .fadump_trigger = opal_fadump_trigger, + .fadump_max_boot_mem_rgns = opal_fadump_max_boot_mem_rgns, }; void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) @@ -674,8 +665,10 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) } } - fadump_conf->ops = &opal_fadump_ops; - fadump_conf->fadump_supported = 1; + fadump_conf->ops = &opal_fadump_ops; + fadump_conf->fadump_supported = 1; + /* TODO: Add support to pass additional parameters */ + fadump_conf->param_area_supported = 0; /* * Firmware supports 32-bit field for size. Align it to PAGE_SIZE diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c index d5ea04e8e4c5..fd8c8621e973 100644 --- a/arch/powerpc/platforms/powernv/opal-flash.c +++ b/arch/powerpc/platforms/powernv/opal-flash.c @@ -432,7 +432,7 @@ static int alloc_image_buf(char *buffer, size_t count) * and pre-allocate required memory. */ static ssize_t image_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int rc; @@ -493,7 +493,7 @@ out: static const struct bin_attribute image_data_attr = { .attr = {.name = "image", .mode = 0200}, .size = MAX_IMAGE_SIZE, /* Limit image size */ - .write = image_data_write, + .write_new = image_data_write, }; static struct kobj_attribute validate_attribute = diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index 56a1f7ce78d2..e180bd8e1400 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -191,7 +191,8 @@ int __init opal_event_init(void) * fall back to the legacy method (opal_event_request(...)) * anyway. */ dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event"); - opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS, + opal_event_irqchip.domain = irq_domain_create_linear(of_fwnode_handle(dn), + MAX_NUM_EVENTS, &opal_event_domain_ops, &opal_event_irqchip); of_node_put(dn); if (!opal_event_irqchip.domain) { @@ -282,6 +283,7 @@ int __init opal_event_init(void) name, NULL); if (rc) { pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start); + kfree(name); continue; } } diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c index 6c3bc4b4da98..bb4218fa796e 100644 --- a/arch/powerpc/platforms/powernv/opal-kmsg.c +++ b/arch/powerpc/platforms/powernv/opal-kmsg.c @@ -20,13 +20,13 @@ * message, it just ensures that OPAL completely flushes the console buffer. */ static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { /* * Outside of a panic context the pollers will continue to run, * so we don't need to do any special flushing. */ - if (reason != KMSG_DUMP_PANIC) + if (detail->reason != KMSG_DUMP_PANIC) return; opal_flush_console(0); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index a16f07cdab26..8a7f39e106bd 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -393,7 +393,7 @@ void __init opal_lpc_init(void) for_each_compatible_node(np, NULL, "ibm,power8-lpc") { if (!of_device_is_available(np)) continue; - if (!of_get_property(np, "primary", NULL)) + if (!of_property_present(np, "primary")) continue; opal_lpc_chip_id = of_get_ibm_chip_id(np); of_node_put(np); diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index 22d6efe17b0d..f1988d0ab45c 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -94,15 +94,15 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) } static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { return opal_msglog_copy(to, pos, count); } -static struct bin_attribute opal_msglog_attr = { +static struct bin_attribute opal_msglog_attr __ro_after_init = { .attr = {.name = "msglog", .mode = 0400}, - .read = opal_msglog_read + .read_new = opal_msglog_read }; struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name) diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 24f04f20d3e8..dc246ed4b7b4 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -443,7 +443,7 @@ static struct platform_driver opal_prd_driver = { .of_match_table = opal_prd_match, }, .probe = opal_prd_probe, - .remove_new = opal_prd_remove, + .remove = opal_prd_remove, }; module_platform_driver(opal_prd_driver); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 45dd77e3ccf6..9ec265fcaff4 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -180,10 +180,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node, /* * Allocate a buffer to hold the MC recoverable ranges. */ - mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); - if (!mc_recoverable_range) - panic("%s: Failed to allocate %u bytes align=0x%lx\n", - __func__, size, __alignof__(u64)); + mc_recoverable_range = memblock_alloc_or_panic(size, __alignof__(u64)); for (i = 0; i < mc_recoverable_range_len; i++) { mc_recoverable_range[i].start_addr = @@ -792,14 +789,6 @@ static int __init opal_sysfs_init(void) return 0; } -static ssize_t export_attr_read(struct file *fp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, - loff_t off, size_t count) -{ - return memory_read_from_buffer(buf, count, &off, bin_attr->private, - bin_attr->size); -} - static int opal_add_one_export(struct kobject *parent, const char *export_name, struct device_node *np, const char *prop_name) { @@ -826,7 +815,7 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name, sysfs_bin_attr_init(attr); attr->attr.name = name; attr->attr.mode = 0400; - attr->read = export_attr_read; + attr->read_new = sysfs_bin_attr_simple_read; attr->private = __va(vals[0]); attr->size = vals[1]; diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c deleted file mode 100644 index 7e419de71db8..000000000000 --- a/arch/powerpc/platforms/powernv/pci-cxl.c +++ /dev/null @@ -1,153 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014-2016 IBM Corp. - */ - -#include <linux/module.h> -#include <misc/cxl-base.h> -#include <asm/pnv-pci.h> -#include <asm/opal.h> - -#include "pci.h" - -int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - struct pnv_ioda_pe *pe; - int rc; - - pe = pnv_ioda_get_pe(dev); - if (!pe) - return -ENODEV; - - pe_info(pe, "Switching PHB to CXL\n"); - - rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number); - if (rc == OPAL_UNSUPPORTED) - dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n"); - else if (rc) - dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc); - - return rc; -} -EXPORT_SYMBOL(pnv_phb_to_cxl_mode); - -/* Find PHB for cxl dev and allocate MSI hwirqs? - * Returns the absolute hardware IRQ number - */ -int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num); - - if (hwirq < 0) { - dev_warn(&dev->dev, "Failed to find a free MSI\n"); - return -ENOSPC; - } - - return phb->msi_base + hwirq; -} -EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs); - -void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - - msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num); -} -EXPORT_SYMBOL(pnv_cxl_release_hwirqs); - -void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, - struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int i, hwirq; - - for (i = 1; i < CXL_IRQ_RANGES; i++) { - if (!irqs->range[i]) - continue; - pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n", - i, irqs->offset[i], - irqs->range[i]); - hwirq = irqs->offset[i] - phb->msi_base; - msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, - irqs->range[i]); - } -} -EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges); - -int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs, - struct pci_dev *dev, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int i, hwirq, try; - - memset(irqs, 0, sizeof(struct cxl_irq_ranges)); - - /* 0 is reserved for the multiplexed PSL DSI interrupt */ - for (i = 1; i < CXL_IRQ_RANGES && num; i++) { - try = num; - while (try) { - hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try); - if (hwirq >= 0) - break; - try /= 2; - } - if (!try) - goto fail; - - irqs->offset[i] = phb->msi_base + hwirq; - irqs->range[i] = try; - pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n", - i, irqs->offset[i], irqs->range[i]); - num -= try; - } - if (num) - goto fail; - - return 0; -fail: - pnv_cxl_release_hwirq_ranges(irqs, dev); - return -ENOSPC; -} -EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges); - -int pnv_cxl_get_irq_count(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - - return phb->msi_bmp.irq_count; -} -EXPORT_SYMBOL(pnv_cxl_get_irq_count); - -int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, - unsigned int virq) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - unsigned int xive_num = hwirq - phb->msi_base; - struct pnv_ioda_pe *pe; - int rc; - - if (!(pe = pnv_ioda_get_pe(dev))) - return -ENODEV; - - /* Assign XIVE to PE */ - rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); - if (rc) { - pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x " - "hwirq 0x%x XIVE 0x%x PE\n", - pci_name(dev), rc, phb->msi_base, hwirq, xive_num); - return -EIO; - } - pnv_set_msi_irq_chip(phb, virq); - - return 0; -} -EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 23f5b5093ec1..d8ccf2c9b98a 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -39,8 +39,6 @@ #include <asm/mmzone.h> #include <asm/xive.h> -#include <misc/cxl-base.h> - #include "powernv.h" #include "pci.h" #include "../../../../drivers/pci/pci.h" @@ -1537,7 +1535,8 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) } } -static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) +static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group, + struct device *dev __maybe_unused) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); @@ -1562,7 +1561,8 @@ static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) return 0; } -static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) +static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group, + struct device *dev __maybe_unused) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); @@ -1634,47 +1634,6 @@ int64_t pnv_opal_pci_msi_eoi(struct irq_data *d) return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq); } -/* - * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers - */ -static void pnv_ioda2_msi_eoi(struct irq_data *d) -{ - int64_t rc; - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - struct pci_controller *hose = irq_data_get_irq_chip_data(d); - struct pnv_phb *phb = hose->private_data; - - rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); - WARN_ON_ONCE(rc); - - icp_native_eoi(d); -} - -/* P8/CXL only */ -void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq) -{ - struct irq_data *idata; - struct irq_chip *ichip; - - /* The MSI EOI OPAL call is only needed on PHB3 */ - if (phb->model != PNV_PHB_MODEL_PHB3) - return; - - if (!phb->ioda.irq_chip_init) { - /* - * First time we setup an MSI IRQ, we need to setup the - * corresponding IRQ chip to route correctly. - */ - idata = irq_get_irq_data(virq); - ichip = irq_data_get_irq_chip(idata); - phb->ioda.irq_chip_init = 1; - phb->ioda.irq_chip = *ichip; - phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; - } - irq_set_chip(virq, &phb->ioda.irq_chip); - irq_set_chip_data(virq, phb->hose); -} - static struct irq_chip pnv_pci_msi_irq_chip; /* @@ -1922,7 +1881,7 @@ static const struct irq_domain_ops pnv_irq_domain_ops = { static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count) { struct pnv_phb *phb = hose->private_data; - struct irq_domain *parent = irq_get_default_host(); + struct irq_domain *parent = irq_get_default_domain(); hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id); if (!hose->fwnode) @@ -1938,7 +1897,7 @@ static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned return -ENOMEM; } - hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn), + hose->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(hose->dn), &pnv_msi_domain_info, hose->dev_domain); if (!hose->msi_domain) { diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 59882da3e742..cc7b1dd54ac6 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -238,7 +238,7 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) } else if (pdev->is_physfn) { /* * For PFs adjust their allocated IOV resources to match what - * the PHB can support using it's M64 BAR table. + * the PHB can support using its M64 BAR table. */ pnv_pci_ioda_fixup_iov_resources(pdev); } @@ -658,7 +658,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) list_add_tail(&pe->list, &phb->ioda.pe_list); mutex_unlock(&phb->ioda.pe_list_mutex); - /* associate this pe to it's pdn */ + /* associate this pe to its pdn */ list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) { if (vf_pdn->busno == vf_bus && vf_pdn->devfn == vf_devfn) { diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 35f566aa0424..b2c1da025410 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -14,7 +14,6 @@ #include <linux/io.h> #include <linux/msi.h> #include <linux/iommu.h> -#include <linux/sched/mm.h> #include <asm/sections.h> #include <asm/io.h> @@ -33,8 +32,6 @@ #include "powernv.h" #include "pci.h" -static DEFINE_MUTEX(tunnel_mutex); - int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) { struct device_node *node = np; @@ -744,64 +741,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid) return tbl; } -struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - - return of_node_get(hose->dn); -} -EXPORT_SYMBOL(pnv_pci_get_phb_node); - -int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) -{ - struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); - u64 tunnel_bar; - __be64 val; - int rc; - - if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) - return -ENXIO; - if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) - return -ENXIO; - - mutex_lock(&tunnel_mutex); - rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); - if (rc != OPAL_SUCCESS) { - rc = -EIO; - goto out; - } - tunnel_bar = be64_to_cpu(val); - if (enable) { - /* - * Only one device per PHB can use atomics. - * Our policy is first-come, first-served. - */ - if (tunnel_bar) { - if (tunnel_bar != addr) - rc = -EBUSY; - else - rc = 0; /* Setting same address twice is ok */ - goto out; - } - } else { - /* - * The device that owns atomics and wants to release - * them must pass the same address with enable == 0. - */ - if (tunnel_bar != addr) { - rc = -EPERM; - goto out; - } - addr = 0x0ULL; - } - rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); - rc = opal_error_code(rc); -out: - mutex_unlock(&tunnel_mutex); - return rc; -} -EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); - void pnv_pci_shutdown(void) { struct pci_controller *hose; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 957f2b47a3c0..42075501663b 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -163,7 +163,6 @@ struct pnv_phb { unsigned int *io_segmap; /* IRQ chip */ - int irq_chip_init; struct irq_chip irq_chip; /* Sorted list of used PE's based @@ -274,7 +273,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn, int where, int size, u32 val); extern struct iommu_table *pnv_pci_table_alloc(int nid); -extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np); extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); @@ -282,7 +280,6 @@ extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn); extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev); -extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq); extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, __u64 window_size, __u32 levels); extern int pnv_eeh_post_init(void); diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 9e1a25398f98..8f41ef364fc6 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -28,7 +28,7 @@ #include <asm/xive.h> #include <asm/opal.h> #include <asm/runlatch.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/dbell.h> #include <asm/kvm_ppc.h> #include <asm/ppc-opcode.h> @@ -36,6 +36,7 @@ #include <asm/kexec.h> #include <asm/reg.h> #include <asm/powernv.h> +#include <asm/systemcfg.h> #include "powernv.h" @@ -136,7 +137,9 @@ static int pnv_smp_cpu_disable(void) * the generic fixup_irqs. --BenH. */ set_cpu_online(cpu, false); - vdso_data->processorCount--; +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->processorCount--; +#endif if (cpu == boot_cpuid) boot_cpuid = cpumask_any(cpu_online_mask); if (xive_enabled()) @@ -434,7 +437,7 @@ void __init pnv_smp_init(void) smp_ops = &pnv_smp_ops; #ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP crash_wake_offline = 1; #endif #endif diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c index 67c8c4b2d8b1..157d9a8134e4 100644 --- a/arch/powerpc/platforms/powernv/ultravisor.c +++ b/arch/powerpc/platforms/powernv/ultravisor.c @@ -32,15 +32,15 @@ int __init early_init_dt_scan_ultravisor(unsigned long node, const char *uname, static struct memcons *uv_memcons; static ssize_t uv_msglog_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { return memcons_copy(uv_memcons, to, pos, count); } -static struct bin_attribute uv_msglog_attr = { +static struct bin_attribute uv_msglog_attr __ro_after_init = { .attr = {.name = "msglog", .mode = 0400}, - .read = uv_msglog_read + .read_new = uv_msglog_read }; static int __init uv_init(void) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index b664838008c1..5147df3a18ac 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1059,7 +1059,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, } } else { /* - * Interrupt hanlder or fault window setup failed. Means + * Interrupt handler or fault window setup failed. Means * NX can not generate fault for page fault. So not * opening for user space tx window. */ diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index 878bc160246e..22d91ac424dd 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/reboot.h> #include <linux/rcuwait.h> +#include <linux/string_choices.h> #include <asm/firmware.h> #include <asm/lv1call.h> @@ -178,7 +179,7 @@ fail_malloc: return result; } -static int __ref ps3_setup_uhc_device( +static int __init ps3_setup_uhc_device( const struct ps3_repository_device *repo, enum ps3_match_id match_id, enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type) { @@ -724,7 +725,7 @@ static irqreturn_t ps3_notification_interrupt(int irq, void *data) static int ps3_notification_read_write(struct ps3_notification_device *dev, u64 lpar, int write) { - const char *op = write ? "write" : "read"; + const char *op = str_write_read(write); unsigned long flags; int res; @@ -770,49 +771,51 @@ static struct task_struct *probe_task; static int ps3_probe_thread(void *data) { - struct ps3_notification_device dev; + struct { + struct ps3_notification_device dev; + u8 buf[512]; + } *local; + struct ps3_notify_cmd *notify_cmd; + struct ps3_notify_event *notify_event; int res; unsigned int irq; u64 lpar; - void *buf; - struct ps3_notify_cmd *notify_cmd; - struct ps3_notify_event *notify_event; pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__); - buf = kzalloc(512, GFP_KERNEL); - if (!buf) + local = kzalloc(sizeof(*local), GFP_KERNEL); + if (!local) return -ENOMEM; - lpar = ps3_mm_phys_to_lpar(__pa(buf)); - notify_cmd = buf; - notify_event = buf; + lpar = ps3_mm_phys_to_lpar(__pa(&local->buf)); + notify_cmd = (struct ps3_notify_cmd *)&local->buf; + notify_event = (struct ps3_notify_event *)&local->buf; /* dummy system bus device */ - dev.sbd.bus_id = (u64)data; - dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID; - dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID; + local->dev.sbd.bus_id = (u64)data; + local->dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID; + local->dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID; - res = lv1_open_device(dev.sbd.bus_id, dev.sbd.dev_id, 0); + res = lv1_open_device(local->dev.sbd.bus_id, local->dev.sbd.dev_id, 0); if (res) { pr_err("%s:%u: lv1_open_device failed %s\n", __func__, __LINE__, ps3_result(res)); goto fail_free; } - res = ps3_sb_event_receive_port_setup(&dev.sbd, PS3_BINDING_CPU_ANY, - &irq); + res = ps3_sb_event_receive_port_setup(&local->dev.sbd, + PS3_BINDING_CPU_ANY, &irq); if (res) { pr_err("%s:%u: ps3_sb_event_receive_port_setup failed %d\n", __func__, __LINE__, res); goto fail_close_device; } - spin_lock_init(&dev.lock); - rcuwait_init(&dev.wait); + spin_lock_init(&local->dev.lock); + rcuwait_init(&local->dev.wait); res = request_irq(irq, ps3_notification_interrupt, 0, - "ps3_notification", &dev); + "ps3_notification", &local->dev); if (res) { pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__, res); @@ -823,7 +826,7 @@ static int ps3_probe_thread(void *data) notify_cmd->operation_code = 0; /* must be zero */ notify_cmd->event_mask = 1UL << notify_region_probe; - res = ps3_notification_read_write(&dev, lpar, 1); + res = ps3_notification_read_write(&local->dev, lpar, 1); if (res) goto fail_free_irq; @@ -834,36 +837,37 @@ static int ps3_probe_thread(void *data) memset(notify_event, 0, sizeof(*notify_event)); - res = ps3_notification_read_write(&dev, lpar, 0); + res = ps3_notification_read_write(&local->dev, lpar, 0); if (res) break; pr_debug("%s:%u: notify event type 0x%llx bus id %llu dev id %llu" " type %llu port %llu\n", __func__, __LINE__, - notify_event->event_type, notify_event->bus_id, - notify_event->dev_id, notify_event->dev_type, - notify_event->dev_port); + notify_event->event_type, notify_event->bus_id, + notify_event->dev_id, notify_event->dev_type, + notify_event->dev_port); if (notify_event->event_type != notify_region_probe || - notify_event->bus_id != dev.sbd.bus_id) { + notify_event->bus_id != local->dev.sbd.bus_id) { pr_warn("%s:%u: bad notify_event: event %llu, dev_id %llu, dev_type %llu\n", __func__, __LINE__, notify_event->event_type, notify_event->dev_id, notify_event->dev_type); continue; } - ps3_find_and_add_device(dev.sbd.bus_id, notify_event->dev_id); + ps3_find_and_add_device(local->dev.sbd.bus_id, + notify_event->dev_id); } while (!kthread_should_stop()); fail_free_irq: - free_irq(irq, &dev); + free_irq(irq, &local->dev); fail_sb_event_receive_port_destroy: - ps3_sb_event_receive_port_destroy(&dev.sbd, irq); + ps3_sb_event_receive_port_destroy(&local->dev.sbd, irq); fail_close_device: - lv1_close_device(dev.sbd.bus_id, dev.sbd.dev_id); + lv1_close_device(local->dev.sbd.bus_id, local->dev.sbd.dev_id); fail_free: - kfree(buf); + kfree(local); probe_task = NULL; diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 49871427f599..a4ad4b49eef7 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c @@ -378,9 +378,9 @@ int ps3_send_event_locally(unsigned int virq) /** * ps3_sb_event_receive_port_setup - Setup a system bus event receive port. + * @dev: The system bus device instance. * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be * serviced on. - * @dev: The system bus device instance. * @virq: The assigned Linux virq. * * An event irq represents a virtual device interrupt. The interrupt_id @@ -743,8 +743,8 @@ void __init ps3_init_IRQ(void) unsigned cpu; struct irq_domain *host; - host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); - irq_set_default_host(host); + host = irq_domain_create_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); + irq_set_default_domain(host); for_each_possible_cpu(cpu) { struct ps3_private *pd = &per_cpu(ps3_private, cpu); diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c index 1abe33fbe529..b8c030eab138 100644 --- a/arch/powerpc/platforms/ps3/repository.c +++ b/arch/powerpc/platforms/ps3/repository.c @@ -940,7 +940,7 @@ int __init ps3_repository_read_vuart_sysmgr_port(unsigned int *port) /** * ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area. - * address: lpar address of cell_ext_os_area + * @lpar_addr: lpar address of cell_ext_os_area * @size: size of cell_ext_os_area */ diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 5144f11359f7..150c09b58ae8 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -115,10 +115,7 @@ static void __init prealloc(struct ps3_prealloc *p) if (!p->size) return; - p->address = memblock_alloc(p->size, p->align); - if (!p->address) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, p->size, p->align); + p->address = memblock_alloc_or_panic(p->size, p->align); printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index 4a2520ec6d7f..61b37c9400b2 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -190,10 +190,10 @@ static void spu_unmap(struct spu *spu) static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; - unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO)); spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr, - sizeof(struct spe_shadow), shadow_flags); + sizeof(struct spe_shadow), + pgprot_noncached_wc(PAGE_KERNEL_RO)); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index d6b5f5ecd515..afbaabf182d0 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -333,10 +333,10 @@ int ps3_mmio_region_init(struct ps3_system_bus_device *dev, EXPORT_SYMBOL_GPL(ps3_mmio_region_init); static int ps3_system_bus_match(struct device *_dev, - struct device_driver *_drv) + const struct device_driver *_drv) { int result; - struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv); + const struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv); struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); if (!dev->match_sub_id) @@ -453,10 +453,9 @@ static ssize_t modalias_show(struct device *_dev, struct device_attribute *a, char *buf) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); - int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id, - dev->match_sub_id); - return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; + return sysfs_emit(buf, "ps3:%d:%d\n", dev->match_id, + dev->match_sub_id); } static DEVICE_ATTR_RO(modalias); @@ -695,7 +694,7 @@ static const struct dma_map_ops ps3_sb_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; @@ -709,7 +708,7 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index afc0f6a61337..fa3c2fff082a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -23,6 +23,7 @@ config PPC_PSERIES select FORCE_SMP select SWIOTLB select ARCH_SUPPORTS_PER_VMA_LOCK + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU default y config PARAVIRT @@ -128,6 +129,15 @@ config CMM will be reused for other LPARs. The interface allows firmware to balance memory across many LPARs. +config HTMDUMP + tristate "PowerVM data dumper" + depends on PPC_PSERIES && DEBUG_FS + default m + help + Select this option, if you want to enable the kernel debugfs + interface to dump the Hardware Trace Macro (HTM) function data + in the LPAR. + config HV_PERF_CTRS bool "Hypervisor supplied PMU events (24x7 & GPCI)" default y @@ -140,6 +150,20 @@ config HV_PERF_CTRS If unsure, select Y. +config VPA_PMU + tristate "VPA PMU events" + depends on KVM_BOOK3S_64_HV && HV_PERF_CTRS + help + Enable access to the VPA PMU counters via perf. This enables + code that support measurement for KVM on PowerVM(KoP) feature. + PAPR hypervisor has introduced three new counters in the VPA area + of LPAR CPUs for KVM L2 guest observability. Two for context switches + from host to guest and vice versa, and one counter for getting + the total time spent inside the KVM guest. This config enables code + that access these software counters via perf. + + If unsure, Select N. + config IBMVIO depends on PPC_PSERIES bool diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index f936962a2946..57222678bb3f 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -1,10 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG obj-y := lpar.o hvCall.o nvram.o reconfig.o \ of_helpers.o rtas-work-area.o papr-sysparm.o \ - papr-vpd.o \ + papr-rtas-common.o papr-vpd.o papr-indices.o \ + papr-platform-dump.o papr-phy-attest.o \ setup.o iommu.o event_sources.o ras.o \ firmware.o power.o dlpar.o mobility.o rng.o \ pci.o pci_dlpar.o eeh_pseries.o msi.o \ @@ -20,6 +20,7 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o obj-$(CONFIG_HVCS) += hvcserver.o obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o obj-$(CONFIG_CMM) += cmm.o +obj-$(CONFIG_HTMDUMP) += htmdump.o obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o obj-$(CONFIG_LPARCFG) += lparcfg.o obj-$(CONFIG_IBMVIO) += vio.o diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 47f8eabd1bee..213aa26dc8b3 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -23,6 +23,7 @@ #include <linux/uaccess.h> #include <asm/rtas.h> #include <asm/rtas-work-area.h> +#include <asm/prom.h> static struct workqueue_struct *pseries_hp_wq; @@ -250,11 +251,8 @@ int dlpar_detach_node(struct device_node *dn) struct device_node *child; int rc; - child = of_get_next_child(dn, NULL); - while (child) { + for_each_child_of_node(dn, child) dlpar_detach_node(child); - child = of_get_next_child(dn, child); - } rc = of_detach_node(dn); if (rc) @@ -264,6 +262,20 @@ int dlpar_detach_node(struct device_node *dn) return 0; } +static int dlpar_changeset_attach_cc_nodes(struct of_changeset *ocs, + struct device_node *dn) +{ + int rc; + + rc = of_changeset_attach_node(ocs, dn); + + if (!rc && dn->child) + rc = dlpar_changeset_attach_cc_nodes(ocs, dn->child); + if (!rc && dn->sibling) + rc = dlpar_changeset_attach_cc_nodes(ocs, dn->sibling); + + return rc; +} #define DR_ENTITY_SENSE 9003 #define DR_ENTITY_PRESENT 1 @@ -330,27 +342,206 @@ int dlpar_unisolate_drc(u32 drc_index) return 0; } -int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +static struct device_node * +get_device_node_with_drc_index(u32 index) +{ + struct device_node *np = NULL; + u32 node_index; + int rc; + + for_each_node_with_property(np, "ibm,my-drc-index") { + rc = of_property_read_u32(np, "ibm,my-drc-index", + &node_index); + if (rc) { + pr_err("%s: %pOF: of_property_read_u32 %s: %d\n", + __func__, np, "ibm,my-drc-index", rc); + of_node_put(np); + return NULL; + } + + if (index == node_index) + break; + } + + return np; +} + +static struct device_node * +get_device_node_with_drc_info(u32 index) +{ + struct device_node *np = NULL; + struct of_drc_info drc; + struct property *info; + const __be32 *value; + u32 node_index; + int i, j, count; + + for_each_node_with_property(np, "ibm,drc-info") { + info = of_find_property(np, "ibm,drc-info", NULL); + if (info == NULL) { + /* XXX can this happen? */ + of_node_put(np); + return NULL; + } + value = of_prop_next_u32(info, NULL, &count); + if (value == NULL) + continue; + value++; + for (i = 0; i < count; i++) { + if (of_read_drc_info_cell(&info, &value, &drc)) + break; + if (index > drc.last_drc_index) + continue; + node_index = drc.drc_index_start; + for (j = 0; j < drc.num_sequential_elems; j++) { + if (index == node_index) + return np; + node_index += drc.sequential_inc; + } + } + } + + return NULL; +} + +static int dlpar_hp_dt_add(u32 index) +{ + struct device_node *np, *nodes; + struct of_changeset ocs; + int rc; + + /* + * Do not add device node(s) if already exists in the + * device tree. + */ + np = get_device_node_with_drc_index(index); + if (np) { + pr_err("%s: Adding device node for index (%d), but " + "already exists in the device tree\n", + __func__, index); + rc = -EINVAL; + goto out; + } + + np = get_device_node_with_drc_info(index); + + if (!np) + return -EIO; + + /* Next, configure the connector. */ + nodes = dlpar_configure_connector(cpu_to_be32(index), np); + if (!nodes) { + rc = -EIO; + goto out; + } + + /* + * Add the new nodes from dlpar_configure_connector() onto + * the device-tree. + */ + of_changeset_init(&ocs); + rc = dlpar_changeset_attach_cc_nodes(&ocs, nodes); + + if (!rc) + rc = of_changeset_apply(&ocs); + else + dlpar_free_cc_nodes(nodes); + + of_changeset_destroy(&ocs); + +out: + of_node_put(np); + return rc; +} + +static int changeset_detach_node_recursive(struct of_changeset *ocs, + struct device_node *node) { + struct device_node *child; int rc; - /* pseries error logs are in BE format, convert to cpu type */ - switch (hp_elog->id_type) { - case PSERIES_HP_ELOG_ID_DRC_COUNT: - hp_elog->_drc_u.drc_count = - be32_to_cpu(hp_elog->_drc_u.drc_count); + for_each_child_of_node(node, child) { + rc = changeset_detach_node_recursive(ocs, child); + if (rc) { + of_node_put(child); + return rc; + } + } + + return of_changeset_detach_node(ocs, node); +} + +static int dlpar_hp_dt_remove(u32 drc_index) +{ + struct device_node *np; + struct of_changeset ocs; + u32 index; + int rc = 0; + + /* + * Prune all nodes with a matching index. + */ + of_changeset_init(&ocs); + + for_each_node_with_property(np, "ibm,my-drc-index") { + rc = of_property_read_u32(np, "ibm,my-drc-index", &index); + if (rc) { + pr_err("%s: %pOF: of_property_read_u32 %s: %d\n", + __func__, np, "ibm,my-drc-index", rc); + of_node_put(np); + goto out; + } + + if (index == drc_index) { + rc = changeset_detach_node_recursive(&ocs, np); + if (rc) { + of_node_put(np); + goto out; + } + } + } + + rc = of_changeset_apply(&ocs); + +out: + of_changeset_destroy(&ocs); + return rc; +} + +static int dlpar_hp_dt(struct pseries_hp_errorlog *phpe) +{ + u32 drc_index; + int rc; + + if (phpe->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX) + return -EINVAL; + + drc_index = be32_to_cpu(phpe->_drc_u.drc_index); + + lock_device_hotplug(); + + switch (phpe->action) { + case PSERIES_HP_ELOG_ACTION_ADD: + rc = dlpar_hp_dt_add(drc_index); break; - case PSERIES_HP_ELOG_ID_DRC_INDEX: - hp_elog->_drc_u.drc_index = - be32_to_cpu(hp_elog->_drc_u.drc_index); + case PSERIES_HP_ELOG_ACTION_REMOVE: + rc = dlpar_hp_dt_remove(drc_index); + break; + default: + pr_err("Invalid action (%d) specified\n", phpe->action); + rc = -EINVAL; break; - case PSERIES_HP_ELOG_ID_DRC_IC: - hp_elog->_drc_u.ic.count = - be32_to_cpu(hp_elog->_drc_u.ic.count); - hp_elog->_drc_u.ic.index = - be32_to_cpu(hp_elog->_drc_u.ic.index); } + unlock_device_hotplug(); + + return rc; +} + +int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +{ + int rc; + switch (hp_elog->resource) { case PSERIES_HP_ELOG_RESOURCE_MEM: rc = dlpar_memory(hp_elog); @@ -361,6 +552,9 @@ int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_RESOURCE_PMEM: rc = dlpar_hp_pmem(hp_elog); break; + case PSERIES_HP_ELOG_RESOURCE_DT: + rc = dlpar_hp_dt(hp_elog); + break; default: pr_warn_ratelimited("Invalid resource (%d) specified\n", @@ -413,6 +607,8 @@ static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog) hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; } else if (sysfs_streq(arg, "cpu")) { hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; + } else if (sysfs_streq(arg, "dt")) { + hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_DT; } else { pr_err("Invalid resource specified.\n"); return -EINVAL; @@ -554,7 +750,7 @@ dlpar_store_out: static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", "memory,cpu"); + return sprintf(buf, "%s\n", "memory,cpu,dt"); } static CLASS_ATTR_RW(dlpar); diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 3f1cdccebc9c..f293588b8c7b 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -191,7 +191,7 @@ static int dtl_enable(struct dtl *dtl) return -EBUSY; /* ensure there are no other conflicting dtl users */ - if (!read_trylock(&dtl_access_lock)) + if (!down_read_trylock(&dtl_access_lock)) return -EBUSY; n_entries = dtl_buf_entries; @@ -199,7 +199,7 @@ static int dtl_enable(struct dtl *dtl) if (!buf) { printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", __func__, dtl->cpu); - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); return -ENOMEM; } @@ -217,7 +217,7 @@ static int dtl_enable(struct dtl *dtl) spin_unlock(&dtl->lock); if (rc) { - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); kmem_cache_free(dtl_cache, buf); } @@ -232,7 +232,7 @@ static void dtl_disable(struct dtl *dtl) dtl->buf = NULL; dtl->buf_entries = 0; spin_unlock(&dtl->lock); - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); } /* file interface */ @@ -325,7 +325,6 @@ static const struct file_operations dtl_fops = { .open = dtl_file_open, .release = dtl_file_release, .read = dtl_file_read, - .llseek = no_llseek, }; static struct dentry *dtl_dir; diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index b1ae0c0d1187..b12ef382fec7 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -580,8 +580,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) switch(rets[0]) { case 0: - result = EEH_STATE_MMIO_ACTIVE | - EEH_STATE_DMA_ACTIVE; + result = EEH_STATE_MMIO_ACTIVE | + EEH_STATE_DMA_ACTIVE | + EEH_STATE_MMIO_ENABLED | + EEH_STATE_DMA_ENABLED; break; case 1: result = EEH_STATE_RESET_ACTIVE | @@ -784,6 +786,43 @@ static int pseries_notify_resume(struct eeh_dev *edev) } #endif +/** + * pseries_eeh_err_inject - Inject specified error to the indicated PE + * @pe: the indicated PE + * @type: error type + * @func: specific error type + * @addr: address + * @mask: address mask + * The routine is called to inject specified error, which is + * determined by @type and @func, to the indicated PE + */ +static int pseries_eeh_err_inject(struct eeh_pe *pe, int type, int func, + unsigned long addr, unsigned long mask) +{ + struct eeh_dev *pdev; + + /* Check on PCI error type */ + if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) + return -EINVAL; + + switch (func) { + case EEH_ERR_FUNC_LD_MEM_ADDR: + case EEH_ERR_FUNC_LD_MEM_DATA: + case EEH_ERR_FUNC_ST_MEM_ADDR: + case EEH_ERR_FUNC_ST_MEM_DATA: + /* injects a MMIO error for all pdev's belonging to PE */ + pci_lock_rescan_remove(); + list_for_each_entry(pdev, &pe->edevs, entry) + eeh_pe_inject_mmio_error(pdev->pdev); + pci_unlock_rescan_remove(); + break; + default: + return -ERANGE; + } + + return 0; +} + static struct eeh_ops pseries_eeh_ops = { .name = "pseries", .probe = pseries_eeh_probe, @@ -792,7 +831,7 @@ static struct eeh_ops pseries_eeh_ops = { .reset = pseries_eeh_reset, .get_log = pseries_eeh_get_log, .configure_bridge = pseries_eeh_configure_bridge, - .err_inject = NULL, + .err_inject = pseries_eeh_err_inject, .read_config = pseries_eeh_read_config, .write_config = pseries_eeh_write_config, .next_error = NULL, diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index e62835a12d73..bc6926dbf148 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -33,6 +33,7 @@ #include <asm/xive.h> #include <asm/plpar_wrappers.h> #include <asm/topology.h> +#include <asm/systemcfg.h> #include "pseries.h" @@ -83,7 +84,9 @@ static int pseries_cpu_disable(void) int cpu = smp_processor_id(); set_cpu_online(cpu, false); - vdso_data->processorCount--; +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->processorCount--; +#endif /*fix boot_cpuid here*/ if (cpu == boot_cpuid) @@ -757,7 +760,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) u32 drc_index; int rc; - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); lock_device_hotplug(); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 3fe3ddb30c04..38dc4f7c9296 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -817,16 +817,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_ADD: switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: - count = hp_elog->_drc_u.drc_count; + count = be32_to_cpu(hp_elog->_drc_u.drc_count); rc = dlpar_memory_add_by_count(count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); rc = dlpar_memory_add_by_index(drc_index); break; case PSERIES_HP_ELOG_ID_DRC_IC: - count = hp_elog->_drc_u.ic.count; - drc_index = hp_elog->_drc_u.ic.index; + count = be32_to_cpu(hp_elog->_drc_u.ic.count); + drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); rc = dlpar_memory_add_by_ic(count, drc_index); break; default: @@ -838,16 +838,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_REMOVE: switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: - count = hp_elog->_drc_u.drc_count; + count = be32_to_cpu(hp_elog->_drc_u.drc_count); rc = dlpar_memory_remove_by_count(count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); rc = dlpar_memory_remove_by_index(drc_index); break; case PSERIES_HP_ELOG_ID_DRC_IC: - count = hp_elog->_drc_u.ic.count; - drc_index = hp_elog->_drc_u.ic.index; + count = be32_to_cpu(hp_elog->_drc_u.ic.count); + drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); rc = dlpar_memory_remove_by_ic(count, drc_index); break; default: diff --git a/arch/powerpc/platforms/pseries/htmdump.c b/arch/powerpc/platforms/pseries/htmdump.c new file mode 100644 index 000000000000..742ec52c9d4d --- /dev/null +++ b/arch/powerpc/platforms/pseries/htmdump.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) IBM Corporation, 2024 + */ + +#define pr_fmt(fmt) "htmdump: " fmt + +#include <linux/debugfs.h> +#include <linux/module.h> +#include <asm/io.h> +#include <asm/machdep.h> +#include <asm/plpar_wrappers.h> +#include <asm/kvm_guest.h> + +static void *htm_buf; +static void *htm_status_buf; +static void *htm_info_buf; +static void *htm_caps_buf; +static u32 nodeindex; +static u32 nodalchipindex; +static u32 coreindexonchip; +static u32 htmtype; +static u32 htmconfigure; +static u32 htmstart; +static u32 htmsetup; +static u64 htmflags; + +static struct dentry *htmdump_debugfs_dir; +#define HTM_ENABLE 1 +#define HTM_DISABLE 0 +#define HTM_NOWRAP 1 +#define HTM_WRAP 0 + +/* + * Check the return code for H_HTM hcall. + * Return non-zero value (1) if either H_PARTIAL or H_SUCCESS + * is returned. For other return codes: + * Return zero if H_NOT_AVAILABLE. + * Return -EBUSY if hcall return busy. + * Return -EINVAL if any parameter or operation is not valid. + * Return -EPERM if HTM Virtualization Engine Technology code + * is not applied. + * Return -EIO if the HTM state is not valid. + */ +static ssize_t htm_return_check(long rc) +{ + switch (rc) { + case H_SUCCESS: + /* H_PARTIAL for the case where all available data can't be + * returned due to buffer size constraint. + */ + case H_PARTIAL: + break; + /* H_NOT_AVAILABLE indicates reading from an offset outside the range, + * i.e. past end of file. + */ + case H_NOT_AVAILABLE: + return 0; + case H_BUSY: + case H_LONG_BUSY_ORDER_1_MSEC: + case H_LONG_BUSY_ORDER_10_MSEC: + case H_LONG_BUSY_ORDER_100_MSEC: + case H_LONG_BUSY_ORDER_1_SEC: + case H_LONG_BUSY_ORDER_10_SEC: + case H_LONG_BUSY_ORDER_100_SEC: + return -EBUSY; + case H_PARAMETER: + case H_P2: + case H_P3: + case H_P4: + case H_P5: + case H_P6: + return -EINVAL; + case H_STATE: + return -EIO; + case H_AUTHORITY: + return -EPERM; + } + + /* + * Return 1 for H_SUCCESS/H_PARTIAL + */ + return 1; +} + +static ssize_t htmdump_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_buf = filp->private_data; + unsigned long page, read_size, available; + loff_t offset; + long rc, ret; + + page = ALIGN_DOWN(*ppos, PAGE_SIZE); + offset = (*ppos) % PAGE_SIZE; + + /* + * Invoke H_HTM call with: + * - operation as htm dump (H_HTM_OP_DUMP_DATA) + * - last three values are address, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DUMP_DATA, virt_to_phys(htm_buf), + PAGE_SIZE, page); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_DATA, returning %ld\n", ret); + return ret; + } + + available = PAGE_SIZE; + read_size = min(count, available); + *ppos += read_size; + return simple_read_from_buffer(ubuf, count, &offset, htm_buf, available); +} + +static const struct file_operations htmdump_fops = { + .llseek = NULL, + .read = htmdump_read, + .open = simple_open, +}; + +static int htmconfigure_set(void *data, u64 val) +{ + long rc, ret; + unsigned long param1 = -1, param2 = -1; + + /* + * value as 1 : configure HTM. + * value as 0 : deconfigure HTM. Return -EINVAL for + * other values. + */ + if (val == HTM_ENABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm configure (H_HTM_OP_CONFIGURE) + * - If htmflags is set, param1 and param2 will be -1 + * which is an indicator to use default htm mode reg mask + * and htm mode reg value. + * - last three values are unused, hence set to zero + */ + if (!htmflags) { + param1 = 0; + param2 = 0; + } + + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_CONFIGURE, param1, param2, 0); + } else if (val == HTM_DISABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm deconfigure (H_HTM_OP_DECONFIGURE) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DECONFIGURE, 0, 0, 0); + } else + return -EINVAL; + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed, returning %ld\n", ret); + return ret; + } + + /* Set htmconfigure if operation succeeds */ + htmconfigure = val; + + return 0; +} + +static int htmconfigure_get(void *data, u64 *val) +{ + *val = htmconfigure; + return 0; +} + +static int htmstart_set(void *data, u64 val) +{ + long rc, ret; + + /* + * value as 1: start HTM + * value as 0: stop HTM + * Return -EINVAL for other values. + */ + if (val == HTM_ENABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm start (H_HTM_OP_START) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_START, 0, 0, 0); + + } else if (val == HTM_DISABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm stop (H_HTM_OP_STOP) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_STOP, 0, 0, 0); + } else + return -EINVAL; + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed, returning %ld\n", ret); + return ret; + } + + /* Set htmstart if H_HTM_OP_START/H_HTM_OP_STOP operation succeeds */ + htmstart = val; + + return 0; +} + +static int htmstart_get(void *data, u64 *val) +{ + *val = htmstart; + return 0; +} + +static ssize_t htmstatus_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_status_buf = filp->private_data; + long rc, ret; + u64 *num_entries; + u64 to_copy; + int htmstatus_flag; + + /* + * Invoke H_HTM call with: + * - operation as htm status (H_HTM_OP_STATUS) + * - last three values as addr, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_STATUS, virt_to_phys(htm_status_buf), + PAGE_SIZE, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_STATUS, returning %ld\n", ret); + return ret; + } + + /* + * HTM status buffer, start of buffer + 0x10 gives the + * number of HTM entries in the buffer. Each nest htm status + * entry is 0x6 bytes where each core htm status entry is + * 0x8 bytes. + * So total count to copy is: + * 32 bytes (for first 7 fields) + (number of HTM entries * entry size) + */ + num_entries = htm_status_buf + 0x10; + if (htmtype == 0x2) + htmstatus_flag = 0x8; + else + htmstatus_flag = 0x6; + to_copy = 32 + (be64_to_cpu(*num_entries) * htmstatus_flag); + return simple_read_from_buffer(ubuf, count, ppos, htm_status_buf, to_copy); +} + +static const struct file_operations htmstatus_fops = { + .llseek = NULL, + .read = htmstatus_read, + .open = simple_open, +}; + +static ssize_t htminfo_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_info_buf = filp->private_data; + long rc, ret; + u64 *num_entries; + u64 to_copy; + + /* + * Invoke H_HTM call with: + * - operation as htm status (H_HTM_OP_STATUS) + * - last three values as addr, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DUMP_SYSPROC_CONF, virt_to_phys(htm_info_buf), + PAGE_SIZE, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_SYSPROC_CONF, returning %ld\n", ret); + return ret; + } + + /* + * HTM status buffer, start of buffer + 0x10 gives the + * number of HTM entries in the buffer. Each entry of processor + * is 16 bytes. + * + * So total count to copy is: + * 32 bytes (for first 5 fields) + (number of HTM entries * entry size) + */ + num_entries = htm_info_buf + 0x10; + to_copy = 32 + (be64_to_cpu(*num_entries) * 16); + return simple_read_from_buffer(ubuf, count, ppos, htm_info_buf, to_copy); +} + +static ssize_t htmcaps_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_caps_buf = filp->private_data; + long rc, ret; + + /* + * Invoke H_HTM call with: + * - operation as htm capabilities (H_HTM_OP_CAPABILITIES) + * - last three values as addr, size (0x80 for Capabilities Output Buffer + * and zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_CAPABILITIES, virt_to_phys(htm_caps_buf), + 0x80, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_CAPABILITIES, returning %ld\n", ret); + return ret; + } + + return simple_read_from_buffer(ubuf, count, ppos, htm_caps_buf, 0x80); +} + +static const struct file_operations htminfo_fops = { + .llseek = NULL, + .read = htminfo_read, + .open = simple_open, +}; + +static const struct file_operations htmcaps_fops = { + .llseek = NULL, + .read = htmcaps_read, + .open = simple_open, +}; + +static int htmsetup_set(void *data, u64 val) +{ + long rc, ret; + + /* + * Input value: HTM buffer size in the power of 2 + * example: hex value 0x21 ( decimal: 33 ) is for + * 8GB + * Invoke H_HTM call with: + * - operation as htm start (H_HTM_OP_SETUP) + * - parameter 1 set to input value. + * - last two values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_SETUP, val, 0, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_SETUP, returning %ld\n", ret); + return ret; + } + + /* Set htmsetup if H_HTM_OP_SETUP operation succeeds */ + htmsetup = val; + + return 0; +} + +static int htmsetup_get(void *data, u64 *val) +{ + *val = htmsetup; + return 0; +} + +static int htmflags_set(void *data, u64 val) +{ + /* + * Input value: + * Currently supported flag value is to enable/disable + * HTM buffer wrap. wrap is used along with "configure" + * to prevent HTM buffer from wrapping. + * Writing 1 will set noWrap while configuring HTM + */ + if (val == HTM_NOWRAP) + htmflags = H_HTM_FLAGS_NOWRAP; + else if (val == HTM_WRAP) + htmflags = 0; + else + return -EINVAL; + + return 0; +} + +static int htmflags_get(void *data, u64 *val) +{ + *val = htmflags; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(htmconfigure_fops, htmconfigure_get, htmconfigure_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmstart_fops, htmstart_get, htmstart_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmsetup_fops, htmsetup_get, htmsetup_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmflags_fops, htmflags_get, htmflags_set, "%llu\n"); + +static int htmdump_init_debugfs(void) +{ + htm_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_buf) { + pr_err("Failed to allocate htmdump buf\n"); + return -ENOMEM; + } + + htmdump_debugfs_dir = debugfs_create_dir("htmdump", + arch_debugfs_dir); + + debugfs_create_u32("nodeindex", 0600, + htmdump_debugfs_dir, &nodeindex); + debugfs_create_u32("nodalchipindex", 0600, + htmdump_debugfs_dir, &nodalchipindex); + debugfs_create_u32("coreindexonchip", 0600, + htmdump_debugfs_dir, &coreindexonchip); + debugfs_create_u32("htmtype", 0600, + htmdump_debugfs_dir, &htmtype); + debugfs_create_file("trace", 0400, htmdump_debugfs_dir, htm_buf, &htmdump_fops); + + /* + * Debugfs interface files to control HTM operations: + */ + debugfs_create_file("htmconfigure", 0600, htmdump_debugfs_dir, NULL, &htmconfigure_fops); + debugfs_create_file("htmstart", 0600, htmdump_debugfs_dir, NULL, &htmstart_fops); + debugfs_create_file("htmsetup", 0600, htmdump_debugfs_dir, NULL, &htmsetup_fops); + debugfs_create_file("htmflags", 0600, htmdump_debugfs_dir, NULL, &htmflags_fops); + + /* Debugfs interface file to present status of HTM */ + htm_status_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_status_buf) { + pr_err("Failed to allocate htmstatus buf\n"); + return -ENOMEM; + } + + /* Debugfs interface file to present System Processor Configuration */ + htm_info_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_info_buf) { + pr_err("Failed to allocate htm info buf\n"); + return -ENOMEM; + } + + /* Debugfs interface file to present HTM capabilities */ + htm_caps_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_caps_buf) { + pr_err("Failed to allocate htm caps buf\n"); + return -ENOMEM; + } + + debugfs_create_file("htmstatus", 0400, htmdump_debugfs_dir, htm_status_buf, &htmstatus_fops); + debugfs_create_file("htminfo", 0400, htmdump_debugfs_dir, htm_info_buf, &htminfo_fops); + debugfs_create_file("htmcaps", 0400, htmdump_debugfs_dir, htm_caps_buf, &htmcaps_fops); + + return 0; +} + +static int __init htmdump_init(void) +{ + /* Disable on kvm guest */ + if (is_kvm_guest()) { + pr_info("htmdump not supported inside KVM guest\n"); + return -EOPNOTSUPP; + } + + if (htmdump_init_debugfs()) + return -ENOMEM; + + return 0; +} + +static void __exit htmdump_exit(void) +{ + debugfs_remove_recursive(htmdump_debugfs_dir); + kfree(htm_buf); +} + +module_init(htmdump_init); +module_exit(htmdump_exit); +MODULE_DESCRIPTION("PHYP Hardware Trace Macro (HTM) data dumper"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index b401282727a4..3436b0af795e 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -339,7 +339,7 @@ static struct attribute *ibmbus_bus_attrs[] = { }; ATTRIBUTE_GROUPS(ibmbus_bus); -static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) +static int ibmebus_bus_bus_match(struct device *dev, const struct device_driver *drv) { const struct of_device_id *matches = drv->of_match_table; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index e8c4129697b1..eec333dd2e59 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/crash_dump.h> #include <linux/memory.h> +#include <linux/vmalloc.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/iommu.h> @@ -51,7 +52,8 @@ enum { enum { DDW_EXT_SIZE = 0, DDW_EXT_RESET_DMA_WIN = 1, - DDW_EXT_QUERY_OUT_SIZE = 2 + DDW_EXT_QUERY_OUT_SIZE = 2, + DDW_EXT_LIMITED_ADDR_MODE = 3 }; static struct iommu_table *iommu_pseries_alloc_table(int node) @@ -67,6 +69,10 @@ static struct iommu_table *iommu_pseries_alloc_table(int node) return tbl; } +#ifdef CONFIG_IOMMU_API +static struct iommu_table_group_ops spapr_tce_table_group_ops; +#endif + static struct iommu_table_group *iommu_pseries_alloc_group(int node) { struct iommu_table_group *table_group; @@ -102,7 +108,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, #endif /* Default DMA window table is at index 0, while DDW at 1. SR-IOV - * adapters only have table on index 1. + * adapters only have table on index 0(if not direct mapped). */ if (table_group->tables[0]) iommu_tce_table_put(table_group->tables[0]); @@ -143,7 +149,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, } -static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) +static void tce_clear_pSeries(struct iommu_table *tbl, long index, long npages) { __be64 *tcep; @@ -162,6 +168,39 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) return be64_to_cpu(*tcep); } +#ifdef CONFIG_IOMMU_API +static long pseries_tce_iommu_userspace_view_alloc(struct iommu_table *tbl) +{ + unsigned long cb = ALIGN(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE); + unsigned long *uas; + + if (tbl->it_indirect_levels) /* Impossible */ + return -EPERM; + + WARN_ON(tbl->it_userspace); + + uas = vzalloc(cb); + if (!uas) + return -ENOMEM; + + tbl->it_userspace = (__be64 *) uas; + + return 0; +} +#endif + +static void tce_iommu_userspace_view_free(struct iommu_table *tbl) +{ + vfree(tbl->it_userspace); + tbl->it_userspace = NULL; +} + +static void tce_free_pSeries(struct iommu_table *tbl) +{ + if (tbl->it_userspace) + tce_iommu_userspace_view_free(tbl); +} + static void tce_free_pSeriesLP(unsigned long liobn, long, long, long); static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); @@ -576,7 +615,7 @@ struct iommu_table_ops iommu_table_lpar_multi_ops; struct iommu_table_ops iommu_table_pseries_ops = { .set = tce_build_pSeries, - .clear = tce_free_pSeries, + .clear = tce_clear_pSeries, .get = tce_get_pseries }; @@ -685,17 +724,47 @@ static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned return rc; } + +static __be64 *tce_useraddr_pSeriesLP(struct iommu_table *tbl, long index, + bool __always_unused alloc) +{ + return tbl->it_userspace ? &tbl->it_userspace[index - tbl->it_offset] : NULL; +} #endif struct iommu_table_ops iommu_table_lpar_multi_ops = { .set = tce_buildmulti_pSeriesLP, #ifdef CONFIG_IOMMU_API .xchg_no_kill = tce_exchange_pseries, + .useraddrptr = tce_useraddr_pSeriesLP, #endif .clear = tce_freemulti_pSeriesLP, - .get = tce_get_pSeriesLP + .get = tce_get_pSeriesLP, + .free = tce_free_pSeries }; +#ifdef CONFIG_IOMMU_API +/* + * When the DMA window properties might have been removed, + * the parent node has the table_group setup on it. + */ +static struct device_node *pci_dma_find_parent_node(struct pci_dev *dev, + struct iommu_table_group *table_group) +{ + struct device_node *dn = pci_device_to_OF_node(dev); + struct pci_dn *rpdn; + + for (; dn && PCI_DN(dn); dn = dn->parent) { + rpdn = PCI_DN(dn); + + if (table_group == rpdn->table_group) + return dn; + } + + return NULL; +} +#endif + /* * Find nearest ibm,dma-window (default DMA window) or direct DMA window or * dynamic 64bit DMA window, walking up the device tree. @@ -786,8 +855,16 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) * parent bus. During reboot, there will be ibm,dma-window property to * define DMA window. For kdump, there will at least be default window or DDW * or both. + * There is an exception to the above. In case the PE goes into frozen + * state, firmware may not provide ibm,dma-window property at the time + * of LPAR boot up. */ + if (!pdn) { + pr_debug(" no ibm,dma-window property !\n"); + return; + } + ppci = PCI_DN(pdn); pr_debug(" parent is %pOF, iommu_table: 0x%p\n", @@ -804,13 +881,6 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) be32_to_cpu(prop.tce_shift), NULL, &iommu_table_lpar_multi_ops); - /* Only for normal boot with default window. Doesn't matter even - * if we set these with DDW which is 64bit during kdump, since - * these will not be used during kdump. - */ - ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base); - ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); - if (!iommu_init_table(tbl, ppci->phb->node, 0, 0)) panic("Failed to initialize iommu table"); @@ -909,7 +979,7 @@ static void __remove_dma_window(struct device_node *np, u32 *ddw_avail, u64 liob } static void remove_dma_window(struct device_node *np, u32 *ddw_avail, - struct property *win) + struct property *win, bool cleanup) { struct dynamic_dma_window_prop *dwp; u64 liobn; @@ -917,11 +987,44 @@ static void remove_dma_window(struct device_node *np, u32 *ddw_avail, dwp = win->value; liobn = (u64)be32_to_cpu(dwp->liobn); - clean_dma_window(np, dwp); + if (cleanup) + clean_dma_window(np, dwp); __remove_dma_window(np, ddw_avail, liobn); } -static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_name) +static void copy_property(struct device_node *pdn, const char *from, const char *to) +{ + struct property *src, *dst; + + src = of_find_property(pdn, from, NULL); + if (!src) + return; + + dst = kzalloc(sizeof(*dst), GFP_KERNEL); + if (!dst) + return; + + dst->name = kstrdup(to, GFP_KERNEL); + dst->value = kmemdup(src->value, src->length, GFP_KERNEL); + dst->length = src->length; + if (!dst->name || !dst->value) + return; + + if (of_add_property(pdn, dst)) { + pr_err("Unable to add DMA window property for %pOF", pdn); + goto free_prop; + } + + return; + +free_prop: + kfree(dst->name); + kfree(dst->value); + kfree(dst); +} + +static int remove_dma_window_named(struct device_node *np, bool remove_prop, const char *win_name, + bool cleanup) { struct property *win; u32 ddw_avail[DDW_APPLICABLE_SIZE]; @@ -936,13 +1039,20 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_ if (ret) return 0; - if (win->length >= sizeof(struct dynamic_dma_window_prop)) - remove_dma_window(np, ddw_avail, win); + remove_dma_window(np, ddw_avail, win, cleanup); if (!remove_prop) return 0; + /* Default window property if removed is lost as reset-pe doesn't restore it. + * Though FDT has a copy of it, the DLPAR hotplugged devices will not have a + * node on FDT until next reboot. So, back it up. + */ + if ((strcmp(win_name, "ibm,dma-window") == 0) && + !of_find_property(np, "ibm,dma-window-saved", NULL)) + copy_property(np, win_name, "ibm,dma-window-saved"); + ret = of_remove_property(np, win); if (ret) pr_warn("%pOF: failed to remove DMA window property: %d\n", @@ -1000,7 +1110,7 @@ static void find_existing_ddw_windows_named(const char *name) for_each_node_with_property(pdn, name) { dma64 = of_get_property(pdn, name, &len); if (!dma64 || len < sizeof(*dma64)) { - remove_ddw(pdn, true, name); + remove_dma_window_named(pdn, true, name, true); continue; } @@ -1175,17 +1285,13 @@ static LIST_HEAD(failed_ddw_pdn_list); static phys_addr_t ddw_memory_hotplug_max(void) { - resource_size_t max_addr = memory_hotplug_max(); - struct device_node *memory; - - for_each_node_by_type(memory, "memory") { - struct resource res; - - if (of_address_to_resource(memory, 0, &res)) - continue; + resource_size_t max_addr; - max_addr = max_t(resource_size_t, max_addr, res.end + 1); - } +#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) + max_addr = hot_add_drconf_memory_max(); +#else + max_addr = memblock_end_of_DRAM(); +#endif return max_addr; } @@ -1222,6 +1328,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn) ret); } +/* + * Platforms support placing PHB in limited address mode starting with LoPAR + * level 2.13 implement. In this mode, the DMA address returned by DDW is over + * 4GB but, less than 64-bits. This benefits IO adapters that don't support + * 64-bits for DMA addresses. + */ +static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn) +{ + int ret; + u32 cfg_addr, reset_dma_win, las_supported; + u64 buid; + struct device_node *dn; + struct pci_dn *pdn; + + ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win); + if (ret) + goto out; + + ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported); + + /* Limited Address Space extension available on the platform but DDW in + * limited addressing mode not supported + */ + if (!ret && !las_supported) + ret = -EPROTO; + + if (ret) { + dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret); + goto out; + } + + dn = pci_device_to_OF_node(dev); + pdn = PCI_DN(dn); + buid = pdn->phb->buid; + cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8); + + ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid), + BUID_LO(buid), 1); + if (ret) + dev_info(&dev->dev, + "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ", + reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid), + ret); + +out: + return ret; +} + /* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */ static int iommu_get_page_shift(u32 query_page_size) { @@ -1289,14 +1443,14 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64 * * returns true if can map all pages (direct mapping), false otherwise.. */ -static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) +static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask) { int len = 0, ret; int max_ram_len = order_base_2(ddw_memory_hotplug_max()); struct ddw_query_response query; struct ddw_create_response create; int page_shift; - u64 win_addr; + u64 win_addr, dynamic_offset = 0; const char *win_name; struct device_node *dn; u32 ddw_avail[DDW_APPLICABLE_SIZE]; @@ -1304,9 +1458,13 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) struct property *win64; struct failed_ddw_pdn *fpdn; bool default_win_removed = false, direct_mapping = false; + bool dynamic_mapping = false; bool pmem_present; struct pci_dn *pci = PCI_DN(pdn); struct property *default_win = NULL; + bool limited_addr_req = false, limited_addr_enabled = false; + int dev_max_ddw; + int ddw_sz; dn = of_find_node_by_type(NULL, "ibm,pmemory"); pmem_present = dn != NULL; @@ -1333,7 +1491,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) * the ibm,ddw-applicable property holds the tokens for: * ibm,query-pe-dma-window * ibm,create-pe-dma-window - * ibm,remove-pe-dma-window * for the given node in that order. * the property is actually in the parent, not the PE */ @@ -1353,6 +1510,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (ret != 0) goto out_failed; + /* DMA Limited Addressing required? This is when the driver has + * requested to create DDW but supports mask which is less than 64-bits + */ + limited_addr_req = (dma_mask != DMA_BIT_MASK(64)); + + /* place the PHB in Limited Addressing mode */ + if (limited_addr_req) { + if (limited_dma_window(dev, pdn)) + goto out_failed; + + /* PHB is in Limited address mode */ + limited_addr_enabled = true; + } + /* * If there is no window available, remove the default DMA window, * if it's present. This will make all the resources available to the @@ -1377,7 +1548,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (reset_win_ext) goto out_failed; - remove_dma_window(pdn, ddw_avail, default_win); + remove_dma_window(pdn, ddw_avail, default_win, true); default_win_removed = true; /* Query again, to check if the window is available */ @@ -1399,6 +1570,14 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_failed; } + /* Maximum DMA window size that the device can address (in log2) */ + dev_max_ddw = fls64(dma_mask); + + /* If the device DMA mask is less than 64-bits, make sure the DMA window + * size is not bigger than what the device can access + */ + ddw_sz = min(order_base_2(query.largest_available_block << page_shift), + dev_max_ddw); /* * The "ibm,pmemory" can appear anywhere in the address space. @@ -1408,30 +1587,56 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) */ len = max_ram_len; if (pmem_present) { - if (query.largest_available_block >= - (1ULL << (MAX_PHYSMEM_BITS - page_shift))) + if (ddw_sz >= MAX_PHYSMEM_BITS) len = MAX_PHYSMEM_BITS; else dev_info(&dev->dev, "Skipping ibm,pmemory"); } /* check if the available block * number of ptes will map everything */ - if (query.largest_available_block < (1ULL << (len - page_shift))) { + if (ddw_sz < len) { dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu %llu-sized pages\n", 1ULL << len, query.largest_available_block, 1ULL << page_shift); - len = order_base_2(query.largest_available_block << page_shift); - win_name = DMA64_PROPNAME; + len = ddw_sz; + dynamic_mapping = true; } else { direct_mapping = !default_win_removed || (len == MAX_PHYSMEM_BITS) || (!pmem_present && (len == max_ram_len)); - win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; + + /* DDW is big enough to direct map RAM. If there is vPMEM, check + * if enough space is left in DDW where we can dynamically + * allocate TCEs for vPMEM. For now, this Hybrid sharing of DDW + * is only for SR-IOV devices. + */ + if (default_win_removed && pmem_present && !direct_mapping) { + /* DDW is big enough to be split */ + if ((1ULL << ddw_sz) >= + MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) { + + direct_mapping = true; + + /* offset of the Dynamic part of DDW */ + dynamic_offset = 1ULL << max_ram_len; + } + + /* DDW will at least have dynamic allocation */ + dynamic_mapping = true; + + /* create max size DDW possible */ + len = ddw_sz; + } } + /* Even if the DDW is split into both direct mapped RAM and dynamically + * mapped vPMEM, the DDW property in OF will be marked as Direct. + */ + win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; + ret = create_ddw(dev, ddw_avail, &create, page_shift, len); if (ret != 0) goto out_failed; @@ -1459,11 +1664,11 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (!window) goto out_del_prop; - if (direct_mapping) { - window->direct = true; + window->direct = direct_mapping; + if (direct_mapping) { /* DDW maps the whole partition, so enable direct DMA mapping */ - ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, + ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT, win64->value, tce_setrange_multi_pSeriesLP_walk); if (ret) { dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", @@ -1473,12 +1678,18 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) clean_dma_window(pdn, win64->value); goto out_del_list; } - } else { + if (default_win_removed) { + iommu_tce_table_put(pci->table_group->tables[0]); + pci->table_group->tables[0] = NULL; + set_iommu_table_base(&dev->dev, NULL); + } + } + + if (dynamic_mapping) { struct iommu_table *newtbl; int i; unsigned long start = 0, end = 0; - - window->direct = false; + u64 dynamic_addr, dynamic_len; for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) { const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM; @@ -1498,20 +1709,27 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_del_list; } - iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr, - 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); - iommu_init_table(newtbl, pci->phb->node, start, end); + /* If the DDW is split between directly mapped RAM and Dynamic + * mapped for TCES, offset into the DDW where the dynamic part + * begins. + */ + dynamic_addr = win_addr + dynamic_offset; + dynamic_len = (1UL << len) - dynamic_offset; + iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, + dynamic_addr, dynamic_len, page_shift, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(newtbl, pci->phb->node, + start >> page_shift, end >> page_shift); - pci->table_group->tables[1] = newtbl; + pci->table_group->tables[default_win_removed ? 0 : 1] = newtbl; set_iommu_table_base(&dev->dev, newtbl); } if (default_win_removed) { - iommu_tce_table_put(pci->table_group->tables[0]); - pci->table_group->tables[0] = NULL; - /* default_win is valid here because default_win_removed == true */ + if (!of_find_property(pdn, "ibm,dma-window-saved", NULL)) + copy_property(pdn, "ibm,dma-window", "ibm,dma-window-saved"); of_remove_property(pdn, default_win); dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn); } @@ -1539,7 +1757,7 @@ out_remove_win: __remove_dma_window(pdn, ddw_avail, create.liobn); out_failed: - if (default_win_removed) + if (default_win_removed || limited_addr_enabled) reset_dma_window(dev, pdn); fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); @@ -1551,17 +1769,84 @@ out_failed: out_unlock: mutex_unlock(&dma_win_init_mutex); - /* - * If we have persistent memory and the window size is only as big - * as RAM, then we failed to create a window to cover persistent - * memory and need to set the DMA limit. + /* If we have persistent memory and the window size is not big enough + * to directly map both RAM and vPMEM, then we need to set DMA limit. */ - if (pmem_present && direct_mapping && len == max_ram_len) - dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len); + if (pmem_present && direct_mapping && len != MAX_PHYSMEM_BITS) + dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + + (1ULL << max_ram_len); + + dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n", + limited_addr_req, limited_addr_enabled, direct_mapping); return direct_mapping; } +static __u64 query_page_size_to_mask(u32 query_page_size) +{ + const long shift[] = { + (SZ_4K), (SZ_64K), (SZ_16M), + (SZ_32M), (SZ_64M), (SZ_128M), + (SZ_256M), (SZ_16G), (SZ_2M) + }; + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(shift); i++) { + if (query_page_size & (1 << i)) + ret |= shift[i]; + } + + return ret; +} + +static void spapr_tce_init_table_group(struct pci_dev *pdev, + struct device_node *pdn, + struct dynamic_dma_window_prop prop) +{ + struct iommu_table_group *table_group = PCI_DN(pdn)->table_group; + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + + struct ddw_query_response query; + int ret; + + /* Only for normal boot with default window. Doesn't matter during + * kdump, since these will not be used during kdump. + */ + if (is_kdump_kernel()) + return; + + if (table_group->max_dynamic_windows_supported != 0) + return; /* already initialized */ + + table_group->tce32_start = be64_to_cpu(prop.dma_base); + table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); + + if (!of_find_property(pdn, "ibm,dma-window", NULL)) + dev_err(&pdev->dev, "default dma window missing!\n"); + + ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) { + table_group->max_dynamic_windows_supported = -1; + return; + } + + ret = query_ddw(pdev, ddw_avail, &query, pdn); + if (ret) { + dev_err(&pdev->dev, "%s: query_ddw failed\n", __func__); + table_group->max_dynamic_windows_supported = -1; + return; + } + + if (query.windows_available == 0) + table_group->max_dynamic_windows_supported = 1; + else + table_group->max_dynamic_windows_supported = IOMMU_TABLE_GROUP_MAX_TABLES; + + table_group->max_levels = 1; + table_group->pgsizes |= query_page_size_to_mask(query.page_size); +} + static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) { struct device_node *pdn, *dn; @@ -1601,13 +1886,6 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) be32_to_cpu(prop.tce_shift), NULL, &iommu_table_lpar_multi_ops); - /* Only for normal boot with default window. Doesn't matter even - * if we set these with DDW which is 64bit during kdump, since - * these will not be used during kdump. - */ - pci->table_group->tce32_start = be64_to_cpu(prop.dma_base); - pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); - iommu_init_table(tbl, pci->phb->node, 0, 0); iommu_register_group(pci->table_group, pci_domain_nr(pci->phb->bus), 0); @@ -1616,6 +1894,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pr_debug(" found DMA window, table: %p\n", pci->table_group); } + spapr_tce_init_table_group(dev, pdn, prop); + set_iommu_table_base(&dev->dev, pci->table_group->tables[0]); iommu_add_device(pci->table_group, &dev->dev); } @@ -1624,8 +1904,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) { struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; - /* only attempt to use a new window if 64-bit DMA is requested */ - if (dma_mask < DMA_BIT_MASK(64)) + /* For DDW, DMA mask should be more than 32-bits. For mask more then + * 32-bits but less then 64-bits, DMA addressing is supported in + * Limited Addressing mode. + */ + if (dma_mask <= DMA_BIT_MASK(32)) return false; dev_dbg(&pdev->dev, "node is %pOF\n", dn); @@ -1638,11 +1921,501 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) */ pdn = pci_dma_find(dn, NULL); if (pdn && PCI_DN(pdn)) - return enable_ddw(pdev, pdn); + return enable_ddw(pdev, pdn, dma_mask); return false; } +#ifdef CONFIG_IOMMU_API +/* + * A simple iommu_table_group_ops which only allows reusing the existing + * iommu_table. This handles VFIO for POWER7 or the nested KVM. + * The ops does not allow creating windows and only allows reusing the existing + * one if it matches table_group->tce32_start/tce32_size/page_shift. + */ +static unsigned long spapr_tce_get_table_size(__u32 page_shift, + __u64 window_size, __u32 levels) +{ + unsigned long size; + + if (levels > 1) + return ~0U; + size = window_size >> (page_shift - 3); + return size; +} + +static struct pci_dev *iommu_group_get_first_pci_dev(struct iommu_group *group) +{ + struct pci_dev *pdev = NULL; + int ret; + + /* No IOMMU group ? */ + if (!group) + return NULL; + + ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table); + if (!ret || !pdev) + return NULL; + return pdev; +} + +static void restore_default_dma_window(struct pci_dev *pdev, struct device_node *pdn) +{ + reset_dma_window(pdev, pdn); + copy_property(pdn, "ibm,dma-window-saved", "ibm,dma-window"); +} + +static long remove_dynamic_dma_windows(struct pci_dev *pdev, struct device_node *pdn) +{ + struct pci_dn *pci = PCI_DN(pdn); + struct dma_win *window; + bool direct_mapping; + int len; + + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, &direct_mapping)) { + remove_dma_window_named(pdn, true, direct_mapping ? + DIRECT64_PROPNAME : DMA64_PROPNAME, true); + if (!direct_mapping) { + WARN_ON(!pci->table_group->tables[0] && !pci->table_group->tables[1]); + + if (pci->table_group->tables[1]) { + iommu_tce_table_put(pci->table_group->tables[1]); + pci->table_group->tables[1] = NULL; + } else if (pci->table_group->tables[0]) { + /* Default window was removed and only the DDW exists */ + iommu_tce_table_put(pci->table_group->tables[0]); + pci->table_group->tables[0] = NULL; + } + } + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { + if (window->device == pdn) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&dma_win_list_lock); + } + + return 0; +} + +static long pseries_setup_default_iommu_config(struct iommu_table_group *table_group, + struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + const __be32 *default_prop; + long liobn, offset, size; + struct device_node *pdn; + struct iommu_table *tbl; + struct pci_dn *pci; + + pdn = pci_dma_find_parent_node(pdev, table_group); + if (!pdn || !PCI_DN(pdn)) { + dev_warn(&pdev->dev, "No table_group configured for the node %pOF\n", pdn); + return -1; + } + pci = PCI_DN(pdn); + + /* The default window is restored if not present already on removal of DDW. + * However, if used by VFIO SPAPR sub driver, the user's order of removal of + * windows might have been different to not leading to auto restoration, + * suppose the DDW was removed first followed by the default one. + * So, restore the default window with reset-pe-dma call explicitly. + */ + restore_default_dma_window(pdev, pdn); + + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + of_parse_dma_window(pdn, default_prop, &liobn, &offset, &size); + tbl = iommu_pseries_alloc_table(pci->phb->node); + if (!tbl) { + dev_err(&pdev->dev, "couldn't create new IOMMU table\n"); + return -1; + } + + iommu_table_setparms_common(tbl, pci->phb->bus->number, liobn, offset, + size, IOMMU_PAGE_SHIFT_4K, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, 0, 0); + + pci->table_group->tables[0] = tbl; + set_iommu_table_base(&pdev->dev, tbl); + + return 0; +} + +static bool is_default_window_request(struct iommu_table_group *table_group, __u32 page_shift, + __u64 window_size) +{ + if ((window_size <= table_group->tce32_size) && + (page_shift == IOMMU_PAGE_SHIFT_4K)) + return true; + + return false; +} + +static long spapr_tce_create_table(struct iommu_table_group *table_group, int num, + __u32 page_shift, __u64 window_size, __u32 levels, + struct iommu_table **ptbl) +{ + struct pci_dev *pdev = iommu_group_get_first_pci_dev(table_group->group); + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + struct ddw_create_response create; + unsigned long liobn, offset, size; + unsigned long start = 0, end = 0; + struct ddw_query_response query; + const __be32 *default_prop; + struct failed_ddw_pdn *fpdn; + unsigned int window_shift; + struct device_node *pdn; + struct iommu_table *tbl; + struct dma_win *window; + struct property *win64; + struct pci_dn *pci; + u64 win_addr; + int len, i; + long ret; + + if (!is_power_of_2(window_size) || levels > 1) + return -EINVAL; + + window_shift = order_base_2(window_size); + + mutex_lock(&dma_win_init_mutex); + + ret = -ENODEV; + + pdn = pci_dma_find_parent_node(pdev, table_group); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + goto out_failed; + } + pci = PCI_DN(pdn); + + /* If the enable DDW failed for the pdn, dont retry! */ + list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) { + if (fpdn->pdn == pdn) { + dev_info(&pdev->dev, "%pOF in failed DDW device list\n", pdn); + goto out_unlock; + } + } + + tbl = iommu_pseries_alloc_table(pci->phb->node); + if (!tbl) { + dev_dbg(&pdev->dev, "couldn't create new IOMMU table\n"); + goto out_unlock; + } + + if (num == 0) { + bool direct_mapping; + /* The request is not for default window? Ensure there is no DDW window already */ + if (!is_default_window_request(table_group, page_shift, window_size)) { + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, + &direct_mapping)) { + dev_warn(&pdev->dev, "%pOF: 64-bit window already present.", pdn); + ret = -EPERM; + goto out_unlock; + } + } else { + /* Request is for Default window, ensure there is no DDW if there is a + * need to reset. reset-pe otherwise removes the DDW also + */ + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + if (!default_prop) { + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, + &direct_mapping)) { + dev_warn(&pdev->dev, "%pOF: Attempt to create window#0 when 64-bit window is present. Preventing the attempt as that would destroy the 64-bit window", + pdn); + ret = -EPERM; + goto out_unlock; + } + + restore_default_dma_window(pdev, pdn); + + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + of_parse_dma_window(pdn, default_prop, &liobn, &offset, &size); + /* Limit the default window size to window_size */ + iommu_table_setparms_common(tbl, pci->phb->bus->number, liobn, + offset, 1UL << window_shift, + IOMMU_PAGE_SHIFT_4K, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, + start >> IOMMU_PAGE_SHIFT_4K, + end >> IOMMU_PAGE_SHIFT_4K); + + table_group->tables[0] = tbl; + + mutex_unlock(&dma_win_init_mutex); + + goto exit; + } + } + } + + ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) { + dev_info(&pdev->dev, "ibm,ddw-applicable not found\n"); + goto out_failed; + } + ret = -ENODEV; + + pr_err("%s: Calling query %pOF\n", __func__, pdn); + ret = query_ddw(pdev, ddw_avail, &query, pdn); + if (ret) + goto out_failed; + ret = -ENODEV; + + len = window_shift; + if (query.largest_available_block < (1ULL << (len - page_shift))) { + dev_dbg(&pdev->dev, "can't map window 0x%llx with %llu %llu-sized pages\n", + 1ULL << len, query.largest_available_block, + 1ULL << page_shift); + ret = -EINVAL; /* Retry with smaller window size */ + goto out_unlock; + } + + if (create_ddw(pdev, ddw_avail, &create, page_shift, len)) { + pr_err("%s: Create ddw failed %pOF\n", __func__, pdn); + goto out_failed; + } + + win_addr = ((u64)create.addr_hi << 32) | create.addr_lo; + win64 = ddw_property_create(DMA64_PROPNAME, create.liobn, win_addr, page_shift, len); + if (!win64) + goto remove_window; + + ret = of_add_property(pdn, win64); + if (ret) { + dev_err(&pdev->dev, "unable to add DMA window property for %pOF: %ld", pdn, ret); + goto free_property; + } + ret = -ENODEV; + + window = ddw_list_new_entry(pdn, win64->value); + if (!window) + goto remove_property; + + window->direct = false; + + for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) { + const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM; + + /* Look for MMIO32 */ + if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) { + start = pci->phb->mem_resources[i].start; + end = pci->phb->mem_resources[i].end; + break; + } + } + + /* New table for using DDW instead of the default DMA window */ + iommu_table_setparms_common(tbl, pci->phb->bus->number, create.liobn, win_addr, + 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, start >> page_shift, end >> page_shift); + + pci->table_group->tables[num] = tbl; + set_iommu_table_base(&pdev->dev, tbl); + pdev->dev.archdata.dma_offset = win_addr; + + spin_lock(&dma_win_list_lock); + list_add(&window->list, &dma_win_list); + spin_unlock(&dma_win_list_lock); + + mutex_unlock(&dma_win_init_mutex); + + goto exit; + +remove_property: + of_remove_property(pdn, win64); +free_property: + kfree(win64->name); + kfree(win64->value); + kfree(win64); +remove_window: + __remove_dma_window(pdn, ddw_avail, create.liobn); + +out_failed: + fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); + if (!fpdn) + goto out_unlock; + fpdn->pdn = pdn; + list_add(&fpdn->list, &failed_ddw_pdn_list); + +out_unlock: + mutex_unlock(&dma_win_init_mutex); + + return ret; +exit: + /* Allocate the userspace view */ + pseries_tce_iommu_userspace_view_alloc(tbl); + tbl->it_allocated_size = spapr_tce_get_table_size(page_shift, window_size, levels); + + *ptbl = iommu_tce_table_get(tbl); + + return 0; +} + +static bool is_default_window_table(struct iommu_table_group *table_group, struct iommu_table *tbl) +{ + if (((tbl->it_size << tbl->it_page_shift) <= table_group->tce32_size) && + (tbl->it_page_shift == IOMMU_PAGE_SHIFT_4K)) + return true; + + return false; +} + +static long spapr_tce_set_window(struct iommu_table_group *table_group, + int num, struct iommu_table *tbl) +{ + return tbl == table_group->tables[num] ? 0 : -EPERM; +} + +static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num) +{ + struct pci_dev *pdev = iommu_group_get_first_pci_dev(table_group->group); + struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; + struct iommu_table *tbl = table_group->tables[num]; + struct failed_ddw_pdn *fpdn; + struct dma_win *window; + const char *win_name; + int ret = -ENODEV; + + if (!tbl) /* The table was never created OR window was never opened */ + return 0; + + mutex_lock(&dma_win_init_mutex); + + if ((num == 0) && is_default_window_table(table_group, tbl)) + win_name = "ibm,dma-window"; + else + win_name = DMA64_PROPNAME; + + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + goto out_failed; + } + + /* Dont clear the TCEs, User should have done it */ + if (remove_dma_window_named(pdn, true, win_name, false)) { + pr_err("%s: The existing DDW removal failed for node %pOF\n", __func__, pdn); + goto out_failed; /* Could not remove it either! */ + } + + if (strcmp(win_name, DMA64_PROPNAME) == 0) { + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { + if (window->device == pdn) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&dma_win_list_lock); + } + + iommu_tce_table_put(table_group->tables[num]); + table_group->tables[num] = NULL; + + ret = 0; + + goto out_unlock; + +out_failed: + fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); + if (!fpdn) + goto out_unlock; + fpdn->pdn = pdn; + list_add(&fpdn->list, &failed_ddw_pdn_list); + +out_unlock: + mutex_unlock(&dma_win_init_mutex); + + return ret; +} + +static long spapr_tce_take_ownership(struct iommu_table_group *table_group, struct device *dev) +{ + struct iommu_table *tbl = table_group->tables[0]; + struct pci_dev *pdev = to_pci_dev(dev); + struct device_node *dn = pci_device_to_OF_node(pdev); + struct device_node *pdn; + + /* SRIOV VFs using direct map by the host driver OR multifunction devices + * where the ownership was taken on the attempt by the first function + */ + if (!tbl && (table_group->max_dynamic_windows_supported != 1)) + return 0; + + mutex_lock(&dma_win_init_mutex); + + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + mutex_unlock(&dma_win_init_mutex); + return -1; + } + + /* + * Though rtas call reset-pe removes the DDW, it doesn't clear the entries on the table + * if there are any. In case of direct map, the entries will be left over, which + * is fine for PEs with 2 DMA windows where the second window is created with create-pe + * at which point the table is cleared. However, on VFs having only one DMA window, the + * default window would end up seeing the entries left over from the direct map done + * on the second window. So, remove the ddw explicitly so that clean_dma_window() + * cleans up the entries if any. + */ + if (remove_dynamic_dma_windows(pdev, pdn)) { + dev_warn(&pdev->dev, "The existing DDW removal failed for node %pOF\n", pdn); + mutex_unlock(&dma_win_init_mutex); + return -1; + } + + /* The table_group->tables[0] is not null now, it must be the default window + * Remove it, let the userspace create it as it needs. + */ + if (table_group->tables[0]) { + remove_dma_window_named(pdn, true, "ibm,dma-window", true); + iommu_tce_table_put(tbl); + table_group->tables[0] = NULL; + } + set_iommu_table_base(dev, NULL); + + mutex_unlock(&dma_win_init_mutex); + + return 0; +} + +static void spapr_tce_release_ownership(struct iommu_table_group *table_group, struct device *dev) +{ + struct iommu_table *tbl = table_group->tables[0]; + + if (tbl) { /* Default window already restored */ + return; + } + + mutex_lock(&dma_win_init_mutex); + + /* Restore the default window */ + pseries_setup_default_iommu_config(table_group, dev); + + mutex_unlock(&dma_win_init_mutex); + + return; +} + +static struct iommu_table_group_ops spapr_tce_table_group_ops = { + .get_table_size = spapr_tce_get_table_size, + .create_table = spapr_tce_create_table, + .set_window = spapr_tce_set_window, + .unset_window = spapr_tce_unset_window, + .take_ownership = spapr_tce_take_ownership, + .release_ownership = spapr_tce_release_ownership, +}; +#endif + static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -1650,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, struct memory_notify *arg = data; int ret = 0; + /* This notifier can get called when onlining persistent memory as well. + * TCEs are not pre-mapped for persistent memory. Persistent memory will + * always be above ddw_memory_hotplug_max() + */ + switch (action) { case MEM_GOING_ONLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - if (window->direct) { + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); } @@ -1666,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, case MEM_OFFLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - if (window->direct) { + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); } @@ -1704,8 +2484,8 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti * we have to remove the property when releasing * the device node. */ - if (remove_ddw(np, false, DIRECT64_PROPNAME)) - remove_ddw(np, false, DMA64_PROPNAME); + if (remove_dma_window_named(np, false, DIRECT64_PROPNAME, true)) + remove_dma_window_named(np, false, DMA64_PROPNAME, true); if (pci && pci->table_group) iommu_pseries_free_group(pci->table_group, diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 096d09ed89f6..431be156ca9b 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary) } else xics_kexec_teardown_cpu(secondary); } - -void pseries_machine_kexec(struct kimage *image) -{ - if (firmware_has_feature(FW_FEATURE_SET_MODE)) - pseries_disable_reloc_on_exc(); - - default_machine_kexec(image); -} diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 4e9916bb03d7..6a415febc53b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/jump_label.h> #include <linux/delay.h> +#include <linux/seq_file.h> #include <linux/stop_machine.h> #include <linux/spinlock.h> #include <linux/cpuhotplug.h> @@ -169,7 +170,7 @@ struct vcpu_dispatch_data { */ #define NR_CPUS_H NR_CPUS -DEFINE_RWLOCK(dtl_access_lock); +DECLARE_RWSEM(dtl_access_lock); static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data); static DEFINE_PER_CPU(u64, dtl_entry_ridx); static DEFINE_PER_CPU(struct dtl_worker, dtl_workers); @@ -463,7 +464,7 @@ static int dtl_worker_enable(unsigned long *time_limit) { int rc = 0, state; - if (!write_trylock(&dtl_access_lock)) { + if (!down_write_trylock(&dtl_access_lock)) { rc = -EBUSY; goto out; } @@ -479,7 +480,7 @@ static int dtl_worker_enable(unsigned long *time_limit) pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); free_dtl_buffers(time_limit); reset_global_dtl_mask(); - write_unlock(&dtl_access_lock); + up_write(&dtl_access_lock); rc = -EINVAL; goto out; } @@ -494,7 +495,7 @@ static void dtl_worker_disable(unsigned long *time_limit) cpuhp_remove_state(dtl_worker_state); free_dtl_buffers(time_limit); reset_global_dtl_mask(); - write_unlock(&dtl_access_lock); + up_write(&dtl_access_lock); } static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, @@ -1886,10 +1887,10 @@ out: * h_get_mpp * H_GET_MPP hcall returns info in 7 parms */ -int h_get_mpp(struct hvcall_mpp_data *mpp_data) +long h_get_mpp(struct hvcall_mpp_data *mpp_data) { - int rc; - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; + long rc; rc = plpar_hcall9(H_GET_MPP, retbuf); diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index f73c4d1c26af..cc22924f159f 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -29,7 +29,6 @@ #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/time.h> -#include <asm/vdso_datapage.h> #include <asm/vio.h> #include <asm/mmu.h> #include <asm/machdep.h> @@ -113,8 +112,8 @@ struct hvcall_ppp_data { */ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data) { - unsigned long rc; - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; + long rc; rc = plpar_hcall9(H_GET_PPP, retbuf); @@ -170,20 +169,24 @@ out: kfree(buf); } -static unsigned h_pic(unsigned long *pool_idle_time, - unsigned long *num_procs) +static long h_pic(unsigned long *pool_idle_time, + unsigned long *num_procs) { - unsigned long rc; - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = {0}; rc = plpar_hcall(H_PIC, retbuf); - *pool_idle_time = retbuf[0]; - *num_procs = retbuf[1]; + if (pool_idle_time) + *pool_idle_time = retbuf[0]; + if (num_procs) + *num_procs = retbuf[1]; return rc; } +unsigned long boot_pool_idle_time; + /* * parse_ppp_data * Parse out the data returned from h_get_ppp and h_pic @@ -193,7 +196,7 @@ static void parse_ppp_data(struct seq_file *m) struct hvcall_ppp_data ppp_data; struct device_node *root; const __be32 *perf_level; - int rc; + long rc; rc = h_get_ppp(&ppp_data); if (rc) @@ -215,9 +218,15 @@ static void parse_ppp_data(struct seq_file *m) seq_printf(m, "pool_capacity=%d\n", ppp_data.active_procs_in_pool * 100); - h_pic(&pool_idle_time, &pool_procs); - seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time); - seq_printf(m, "pool_num_procs=%ld\n", pool_procs); + /* In case h_pic call is not successful, this would result in + * APP values being wrong in tools like lparstat. + */ + + if (h_pic(&pool_idle_time, &pool_procs) == H_SUCCESS) { + seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time); + seq_printf(m, "pool_num_procs=%ld\n", pool_procs); + seq_printf(m, "boot_pool_idle_time=%ld\n", boot_pool_idle_time); + } } seq_printf(m, "unallocated_capacity_weight=%d\n", @@ -361,8 +370,8 @@ static int read_dt_lpar_name(struct seq_file *m) static void read_lpar_name(struct seq_file *m) { - if (read_rtas_lpar_name(m) && read_dt_lpar_name(m)) - pr_err_once("Error can't get the LPAR name"); + if (read_rtas_lpar_name(m)) + read_dt_lpar_name(m); } #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) @@ -520,7 +529,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL); if (lrdrp == NULL) { - partition_potential_processors = vdso_data->processorCount; + partition_potential_processors = num_possible_cpus(); } else { partition_potential_processors = be32_to_cpup(lrdrp + 4); } @@ -543,7 +552,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) } else { /* non SPLPAR case */ seq_printf(m, "system_active_processors=%d\n", - partition_potential_processors); + partition_active_processors); seq_printf(m, "system_potential_processors=%d\n", partition_potential_processors); @@ -792,6 +801,7 @@ static const struct proc_ops lparcfg_proc_ops = { static int __init lparcfg_init(void) { umode_t mode = 0444; + long retval; /* Allow writing if we have FW_FEATURE_SPLPAR */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) @@ -801,6 +811,16 @@ static int __init lparcfg_init(void) printk(KERN_ERR "Failed to create powerpc/lparcfg\n"); return -EIO; } + + /* If this call fails, it would result in APP values + * being wrong for since boot reports of lparstat + */ + retval = h_pic(&boot_pool_idle_time, NULL); + + if (retval != H_SUCCESS) + pr_debug("H_PIC failed during lparcfg init retval: %ld\n", + retval); + return 0; } machine_device_initcall(pseries, lparcfg_init); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 1798f0f14d58..62bd8e2d5d4c 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -53,7 +53,7 @@ struct update_props_workarea { static unsigned int nmi_wd_lpm_factor = 200; #ifdef CONFIG_SYSCTL -static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { +static const struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { { .procname = "nmi_wd_lpm_factor", .data = &nmi_wd_lpm_factor, diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6dfb55b52d36..ee1c8c6898a3 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -9,6 +9,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> +#include <linux/seq_file.h> #include <asm/rtas.h> #include <asm/hw_irq.h> @@ -524,7 +525,12 @@ static struct msi_domain_info pseries_msi_domain_info = { static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { - __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + struct pci_dev *dev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + + if (dev->current_state == PCI_D0) + __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + else + get_cached_msi_msg(data->irq, msg); } static struct irq_chip pseries_msi_irq_chip = { @@ -610,7 +616,7 @@ static const struct irq_domain_ops pseries_irq_domain_ops = { static int __pseries_msi_allocate_domains(struct pci_controller *phb, unsigned int count) { - struct irq_domain *parent = irq_get_default_host(); + struct irq_domain *parent = irq_get_default_domain(); phb->fwnode = irq_domain_alloc_named_id_fwnode("pSeries-MSI", phb->global_number); @@ -627,7 +633,7 @@ static int __pseries_msi_allocate_domains(struct pci_controller *phb, return -ENOMEM; } - phb->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(phb->dn), + phb->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(phb->dn), &pseries_msi_domain_info, phb->dev_domain); if (!phb->msi_domain) { diff --git a/arch/powerpc/platforms/pseries/papr-indices.c b/arch/powerpc/platforms/pseries/papr-indices.c new file mode 100644 index 000000000000..3c7545591c45 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-indices.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-indices: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-indices.h> +#include "papr-rtas-common.h" + +/* + * Function-specific return values for ibm,set-dynamic-indicator and + * ibm,get-dynamic-sensor-state RTAS calls. + * PAPR+ v2.13 7.3.18 and 7.3.19. + */ +#define RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR -3 + +/** + * struct rtas_get_indices_params - Parameters (in and out) for + * ibm,get-indices. + * @is_sensor: In: Caller-provided whether sensor or indicator. + * @indice_type:In: Caller-provided indice (sensor or indicator) token + * @work_area: In: Caller-provided work area buffer for results. + * @next: In: Sequence number. Out: Next sequence number. + * @status: Out: RTAS call status. + */ +struct rtas_get_indices_params { + u8 is_sensor; + u32 indice_type; + struct rtas_work_area *work_area; + u32 next; + s32 status; +}; + +/* + * rtas_ibm_get_indices() - Call ibm,get-indices to fill a work area buffer. + * @params: See &struct rtas_ibm_get_indices_params. + * + * Calls ibm,get-indices until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_ibm_get_indices() multiple times + * to retrieve all indices data for the provided indice type. Only one + * sequence should be in progress at any time; starting a new sequence + * will disrupt any sequence already in progress. Serialization of + * indices retrieval sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_ibm_get_indices(struct rtas_get_indices_params *params) +{ + struct rtas_work_area *work_area = params->work_area; + const s32 token = rtas_function_token(RTAS_FN_IBM_GET_INDICES); + u32 rets; + s32 fwrc; + int ret; + + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + lockdep_assert_held(&rtas_ibm_get_indices_lock); + + do { + fwrc = rtas_call(token, 5, 2, &rets, params->is_sensor, + params->indice_type, + rtas_work_area_phys(work_area), + rtas_work_area_size(work_area), + params->next); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: /* Indicator type is not supported */ + ret = -EINVAL; + break; + case RTAS_SEQ_START_OVER: + ret = -EAGAIN; + pr_info_ratelimited("Indices changed during retrieval, retrying\n"); + params->next = 1; + break; + case RTAS_SEQ_MORE_DATA: + params->next = rets; + ret = 0; + break; + case RTAS_SEQ_COMPLETE: + params->next = 0; + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-indices status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal indices sequence APIs. A sequence is a series of calls to + * ibm,get-indices for a given location code. The sequence ends when + * an error is encountered or all indices for the input has been + * returned. + */ + +/* + * indices_sequence_begin() - Begin a indices retrieval sequence. + * + * Context: May sleep. + */ +static void indices_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_get_indices_params *param; + + param = (struct rtas_get_indices_params *)seq->params; + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_get_indices_lock); + param->work_area = rtas_work_area_alloc(RTAS_GET_INDICES_BUF_SIZE); + param->next = 1; + param->status = 0; +} + +/* + * indices_sequence_end() - Finalize a indices retrieval sequence. + * + * Releases resources obtained by indices_sequence_begin(). + */ +static void indices_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_get_indices_params *param; + + param = (struct rtas_get_indices_params *)seq->params; + rtas_work_area_free(param->work_area); + mutex_unlock(&rtas_ibm_get_indices_lock); +} + +/* + * Work function to be passed to papr_rtas_blob_generate(). + * + * ibm,get-indices RTAS call fills the work area with the certain + * format but does not return the bytes written in the buffer. So + * instead of kernel parsing this work area to determine the buffer + * length, copy the complete work area (RTAS_GET_INDICES_BUF_SIZE) + * to the blob and let the user space to obtain the data. + * Means RTAS_GET_INDICES_BUF_SIZE data will be returned for each + * read(). + */ + +static const char *indices_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_get_indices_params *p; + bool init_state; + + p = (struct rtas_get_indices_params *)seq->params; + init_state = (p->next == 1) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_indices(p))) + return NULL; + + *len = RTAS_GET_INDICES_BUF_SIZE; + return rtas_work_area_raw_buf(p->work_area); +} + +/* + * papr_indices_handle_read - returns indices blob data to the user space + * + * ibm,get-indices RTAS call fills the work area with the certian + * format but does not return the bytes written in the buffer and + * copied RTAS_GET_INDICES_BUF_SIZE data to the blob for each RTAS + * call. So send RTAS_GET_INDICES_BUF_SIZE buffer to the user space + * for each read(). + */ +static ssize_t papr_indices_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + const struct papr_rtas_blob *blob = file->private_data; + + /* we should not instantiate a handle without any data attached. */ + if (!papr_rtas_blob_has_data(blob)) { + pr_err_once("handle without data\n"); + return -EIO; + } + + if (size < RTAS_GET_INDICES_BUF_SIZE) { + pr_err_once("Invalid buffer length %ld, expect %d\n", + size, RTAS_GET_INDICES_BUF_SIZE); + return -EINVAL; + } else if (size > RTAS_GET_INDICES_BUF_SIZE) + size = RTAS_GET_INDICES_BUF_SIZE; + + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); +} + +static const struct file_operations papr_indices_handle_ops = { + .read = papr_indices_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/* + * papr_indices_create_handle() - Create a fd-based handle for reading + * indices data + * @ubuf: Input parameters to RTAS call such as whether sensor or indicator + * and indice type in user memory + * + * Handler for PAPR_INDICES_IOC_GET ioctl command. Validates @ubuf + * and instantiates an immutable indices "blob" for it. The blob is + * attached to a file descriptor for reading by user space. The memory + * backing the blob is freed when the file is released. + * + * The entire requested indices is retrieved by this call and all + * necessary RTAS interactions are performed before returning the fd + * to user space. This keeps the read handler simple and ensures that + * the kernel can prevent interleaving of ibm,get-indices call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_indices_create_handle(struct papr_indices_io_block __user *ubuf) +{ + struct papr_rtas_sequence seq = {}; + struct rtas_get_indices_params params = {}; + int fd; + + if (get_user(params.is_sensor, &ubuf->indices.is_sensor)) + return -EFAULT; + + if (get_user(params.indice_type, &ubuf->indices.indice_type)) + return -EFAULT; + + seq = (struct papr_rtas_sequence) { + .begin = indices_sequence_begin, + .end = indices_sequence_end, + .work = indices_sequence_fill_work_area, + }; + + seq.params = ¶ms; + fd = papr_rtas_setup_file_interface(&seq, + &papr_indices_handle_ops, "[papr-indices]"); + + return fd; +} + +/* + * Create work area with the input parameters. This function is used + * for both ibm,set-dynamic-indicator and ibm,get-dynamic-sensor-state + * RTAS Calls. + */ +static struct rtas_work_area * +papr_dynamic_indice_buf_from_user(struct papr_indices_io_block __user *ubuf, + struct papr_indices_io_block *kbuf) +{ + struct rtas_work_area *work_area; + u32 length; + __be32 len_be; + + if (copy_from_user(kbuf, ubuf, sizeof(*kbuf))) + return ERR_PTR(-EFAULT); + + + if (!string_is_terminated(kbuf->dynamic_param.location_code_str, + ARRAY_SIZE(kbuf->dynamic_param.location_code_str))) + return ERR_PTR(-EINVAL); + + /* + * The input data in the work area should be as follows: + * - 32-bit integer length of the location code string, + * including NULL. + * - Location code string, NULL terminated, identifying the + * token (sensor or indicator). + * PAPR 2.13 - R1–7.3.18–5 ibm,set-dynamic-indicator + * - R1–7.3.19–5 ibm,get-dynamic-sensor-state + */ + /* + * Length that user space passed should also include NULL + * terminator. + */ + length = strlen(kbuf->dynamic_param.location_code_str) + 1; + if (length > LOC_CODE_SIZE) + return ERR_PTR(-EINVAL); + + len_be = cpu_to_be32(length); + + work_area = rtas_work_area_alloc(LOC_CODE_SIZE + sizeof(u32)); + memcpy(rtas_work_area_raw_buf(work_area), &len_be, sizeof(u32)); + memcpy((rtas_work_area_raw_buf(work_area) + sizeof(u32)), + &kbuf->dynamic_param.location_code_str, length); + + return work_area; +} + +/** + * papr_dynamic_indicator_ioc_set - ibm,set-dynamic-indicator RTAS Call + * PAPR 2.13 7.3.18 + * + * @ubuf: Input parameters to RTAS call such as indicator token and + * new state. + * + * Returns success or -errno. + */ +static long papr_dynamic_indicator_ioc_set(struct papr_indices_io_block __user *ubuf) +{ + struct papr_indices_io_block kbuf; + struct rtas_work_area *work_area; + s32 fwrc, token, ret; + + token = rtas_function_token(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + mutex_lock(&rtas_ibm_set_dynamic_indicator_lock); + work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf); + if (IS_ERR(work_area)) { + ret = PTR_ERR(work_area); + goto out; + } + + do { + fwrc = rtas_call(token, 3, 1, NULL, + kbuf.dynamic_param.token, + kbuf.dynamic_param.state, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + rtas_work_area_free(work_area); + + switch (fwrc) { + case RTAS_SUCCESS: + ret = 0; + break; + case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */ + ret = -EOPNOTSUPP; + break; + default: + pr_err("unexpected ibm,set-dynamic-indicator result %d\n", + fwrc); + fallthrough; + case RTAS_HARDWARE_ERROR: /* Hardware/platform error */ + ret = -EIO; + break; + } + +out: + mutex_unlock(&rtas_ibm_set_dynamic_indicator_lock); + return ret; +} + +/** + * papr_dynamic_sensor_ioc_get - ibm,get-dynamic-sensor-state RTAS Call + * PAPR 2.13 7.3.19 + * + * @ubuf: Input parameters to RTAS call such as sensor token + * Copies the state in user space buffer. + * + * + * Returns success or -errno. + */ + +static long papr_dynamic_sensor_ioc_get(struct papr_indices_io_block __user *ubuf) +{ + struct papr_indices_io_block kbuf; + struct rtas_work_area *work_area; + s32 fwrc, token, ret; + u32 rets; + + token = rtas_function_token(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + mutex_lock(&rtas_ibm_get_dynamic_sensor_state_lock); + work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf); + if (IS_ERR(work_area)) { + ret = PTR_ERR(work_area); + goto out; + } + + do { + fwrc = rtas_call(token, 2, 2, &rets, + kbuf.dynamic_param.token, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + rtas_work_area_free(work_area); + + switch (fwrc) { + case RTAS_SUCCESS: + if (put_user(rets, &ubuf->dynamic_param.state)) + ret = -EFAULT; + else + ret = 0; + break; + case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */ + ret = -EOPNOTSUPP; + break; + default: + pr_err("unexpected ibm,get-dynamic-sensor result %d\n", + fwrc); + fallthrough; + case RTAS_HARDWARE_ERROR: /* Hardware/platform error */ + ret = -EIO; + break; + } + +out: + mutex_unlock(&rtas_ibm_get_dynamic_sensor_state_lock); + return ret; +} + +/* + * Top-level ioctl handler for /dev/papr-indices. + */ +static long papr_indices_dev_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_INDICES_IOC_GET: + ret = papr_indices_create_handle(argp); + break; + case PAPR_DYNAMIC_SENSOR_IOC_GET: + ret = papr_dynamic_sensor_ioc_get(argp); + break; + case PAPR_DYNAMIC_INDICATOR_IOC_SET: + if (filp->f_mode & FMODE_WRITE) + ret = papr_dynamic_indicator_ioc_set(argp); + else + ret = -EBADF; + break; + default: + ret = -ENOIOCTLCMD; + break; + } + + return ret; +} + +static const struct file_operations papr_indices_ops = { + .unlocked_ioctl = papr_indices_dev_ioctl, +}; + +static struct miscdevice papr_indices_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-indices", + .fops = &papr_indices_ops, +}; + +static __init int papr_indices_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_GET_INDICES)) + return -ENODEV; + + if (!rtas_function_implemented(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR)) + return -ENODEV; + + if (!rtas_function_implemented(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE)) + return -ENODEV; + + return misc_register(&papr_indices_dev); +} +machine_device_initcall(pseries, papr_indices_init); diff --git a/arch/powerpc/platforms/pseries/papr-phy-attest.c b/arch/powerpc/platforms/pseries/papr-phy-attest.c new file mode 100644 index 000000000000..1907f2411567 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-phy-attest.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-phy-attest: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-physical-attestation.h> +#include "papr-rtas-common.h" + +/** + * struct rtas_phy_attest_params - Parameters (in and out) for + * ibm,physical-attestation. + * + * @cmd: In: Caller-provided attestation command buffer. Must be + * RTAS-addressable. + * @work_area: In: Caller-provided work area buffer for attestation + * command structure + * Out: Caller-provided work area buffer for the response + * @cmd_len: In: Caller-provided attestation command structure + * length + * @sequence: In: Sequence number. Out: Next sequence number. + * @written: Out: Bytes written by ibm,physical-attestation to + * @work_area. + * @status: Out: RTAS call status. + */ +struct rtas_phy_attest_params { + struct papr_phy_attest_io_block cmd; + struct rtas_work_area *work_area; + u32 cmd_len; + u32 sequence; + u32 written; + s32 status; +}; + +/** + * rtas_physical_attestation() - Call ibm,physical-attestation to + * fill a work area buffer. + * @params: See &struct rtas_phy_attest_params. + * + * Calls ibm,physical-attestation until it errors or successfully + * deposits data into the supplied work area. Handles RTAS retry + * statuses. Maps RTAS error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_physical_attestation() + * multiple times to retrieve all the data for the provided + * attestation command. Only one sequence should be in progress at + * any time; starting a new sequence will disrupt any sequence + * already in progress. Serialization of attestation retrieval + * sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_physical_attestation(struct rtas_phy_attest_params *params) +{ + struct rtas_work_area *work_area; + s32 fwrc, token; + u32 rets[2]; + int ret; + + work_area = params->work_area; + token = rtas_function_token(RTAS_FN_IBM_PHYSICAL_ATTESTATION); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + lockdep_assert_held(&rtas_ibm_physical_attestation_lock); + + do { + fwrc = rtas_call(token, 3, 3, rets, + rtas_work_area_phys(work_area), + params->cmd_len, + params->sequence); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: + ret = -EINVAL; + break; + case RTAS_SEQ_MORE_DATA: + params->sequence = rets[0]; + fallthrough; + case RTAS_SEQ_COMPLETE: + params->written = rets[1]; + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(params->written > rtas_work_area_size(work_area), + "possible write beyond end of work area")) + ret = -EFAULT; + else + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-phy_attest status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal physical-attestation sequence APIs. A physical-attestation + * sequence is a series of calls to get ibm,physical-attestation + * for a given attestation command. The sequence ends when an error + * is encountered or all data for the attestation command has been + * returned. + */ + +/** + * phy_attest_sequence_begin() - Begin a response data for attestation + * command retrieval sequence. + * @seq: user specified parameters for RTAS call from seq struct. + * + * Context: May sleep. + */ +static void phy_attest_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_phy_attest_params *param; + + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_physical_attestation_lock); + param = (struct rtas_phy_attest_params *)seq->params; + param->work_area = rtas_work_area_alloc(SZ_4K); + memcpy(rtas_work_area_raw_buf(param->work_area), ¶m->cmd, + param->cmd_len); + param->sequence = 1; + param->status = 0; +} + +/** + * phy_attest_sequence_end() - Finalize a attestation command + * response retrieval sequence. + * @seq: Sequence state. + * + * Releases resources obtained by phy_attest_sequence_begin(). + */ +static void phy_attest_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_phy_attest_params *param; + + param = (struct rtas_phy_attest_params *)seq->params; + rtas_work_area_free(param->work_area); + mutex_unlock(&rtas_ibm_physical_attestation_lock); + kfree(param); +} + +/* + * Generator function to be passed to papr_rtas_blob_generate(). + */ +static const char *phy_attest_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_phy_attest_params *p; + bool init_state; + + p = (struct rtas_phy_attest_params *)seq->params; + init_state = (p->written == 0) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_physical_attestation(p))) + return NULL; + *len = p->written; + return rtas_work_area_raw_buf(p->work_area); +} + +static const struct file_operations papr_phy_attest_handle_ops = { + .read = papr_rtas_common_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/** + * papr_phy_attest_create_handle() - Create a fd-based handle for + * reading the response for the given attestation command. + * @ulc: Attestation command in user memory; defines the scope of + * data for the attestation command to retrieve. + * + * Handler for PAPR_PHYSICAL_ATTESTATION_IOC_CREATE_HANDLE ioctl + * command. Validates @ulc and instantiates an immutable response + * "blob" for attestation command. The blob is attached to a file + * descriptor for reading by user space. The memory backing the blob + * is freed when the file is released. + * + * The entire requested response buffer for the attestation command + * retrieved by this call and all necessary RTAS interactions are + * performed before returning the fd to user space. This keeps the + * read handler simple and ensures that kernel can prevent + * interleaving ibm,physical-attestation call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_phy_attest_create_handle(struct papr_phy_attest_io_block __user *ulc) +{ + struct rtas_phy_attest_params *params; + struct papr_rtas_sequence seq = {}; + int fd; + + /* + * Freed in phy_attest_sequence_end(). + */ + params = kzalloc(sizeof(*params), GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + if (copy_from_user(¶ms->cmd, ulc, + sizeof(struct papr_phy_attest_io_block))) + return -EFAULT; + + params->cmd_len = be32_to_cpu(params->cmd.length); + seq = (struct papr_rtas_sequence) { + .begin = phy_attest_sequence_begin, + .end = phy_attest_sequence_end, + .work = phy_attest_sequence_fill_work_area, + }; + + seq.params = (void *)params; + + fd = papr_rtas_setup_file_interface(&seq, + &papr_phy_attest_handle_ops, + "[papr-physical-attestation]"); + + return fd; +} + +/* + * Top-level ioctl handler for /dev/papr-physical-attestation. + */ +static long papr_phy_attest_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_PHY_ATTEST_IOC_HANDLE: + ret = papr_phy_attest_create_handle(argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_phy_attest_ops = { + .unlocked_ioctl = papr_phy_attest_dev_ioctl, +}; + +static struct miscdevice papr_phy_attest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-physical-attestation", + .fops = &papr_phy_attest_ops, +}; + +static __init int papr_phy_attest_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_PHYSICAL_ATTESTATION)) + return -ENODEV; + + return misc_register(&papr_phy_attest_dev); +} +machine_device_initcall(pseries, papr_phy_attest_init); diff --git a/arch/powerpc/platforms/pseries/papr-platform-dump.c b/arch/powerpc/platforms/pseries/papr-platform-dump.c new file mode 100644 index 000000000000..f8d55eccdb6b --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-platform-dump.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-platform-dump: " fmt + +#include <linux/anon_inodes.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-platform-dump.h> + +/* + * Function-specific return values for ibm,platform-dump, derived from + * PAPR+ v2.13 7.3.3.4.1 "ibm,platform-dump RTAS Call". + */ +#define RTAS_IBM_PLATFORM_DUMP_COMPLETE 0 /* Complete dump retrieved. */ +#define RTAS_IBM_PLATFORM_DUMP_CONTINUE 1 /* Continue dump */ +#define RTAS_NOT_AUTHORIZED -9002 /* Not Authorized */ + +#define RTAS_IBM_PLATFORM_DUMP_START 2 /* Linux status to start dump */ + +/** + * struct ibm_platform_dump_params - Parameters (in and out) for + * ibm,platform-dump + * @work_area: In: work area buffer for results. + * @buf_length: In: work area buffer length in bytes + * @dump_tag_hi: In: Most-significant 32 bits of a Dump_Tag representing + * an id of the dump being processed. + * @dump_tag_lo: In: Least-significant 32 bits of a Dump_Tag representing + * an id of the dump being processed. + * @sequence_hi: In: Sequence number in most-significant 32 bits. + * Out: Next sequence number in most-significant 32 bits. + * @sequence_lo: In: Sequence number in Least-significant 32 bits + * Out: Next sequence number in Least-significant 32 bits. + * @bytes_ret_hi: Out: Bytes written in most-significant 32 bits. + * @bytes_ret_lo: Out: Bytes written in Least-significant 32 bits. + * @status: Out: RTAS call status. + * @list: Maintain the list of dumps are in progress. Can + * retrieve multiple dumps with different dump IDs at + * the same time but not with the same dump ID. This list + * is used to determine whether the dump for the same ID + * is in progress. + */ +struct ibm_platform_dump_params { + struct rtas_work_area *work_area; + u32 buf_length; + u32 dump_tag_hi; + u32 dump_tag_lo; + u32 sequence_hi; + u32 sequence_lo; + u32 bytes_ret_hi; + u32 bytes_ret_lo; + s32 status; + struct list_head list; +}; + +/* + * Multiple dumps with different dump IDs can be retrieved at the same + * time, but not with dame dump ID. platform_dump_list_mutex and + * platform_dump_list are used to prevent this behavior. + */ +static DEFINE_MUTEX(platform_dump_list_mutex); +static LIST_HEAD(platform_dump_list); + +/** + * rtas_ibm_platform_dump() - Call ibm,platform-dump to fill a work area + * buffer. + * @params: See &struct ibm_platform_dump_params. + * @buf_addr: Address of dump buffer (work_area) + * @buf_length: Length of the buffer in bytes (min. 1024) + * + * Calls ibm,platform-dump until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * Can request multiple dumps with different dump IDs at the same time, + * but not with the same dump ID which is prevented with the check in + * the ioctl code (papr_platform_dump_create_handle()). + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 for dump complete and 1 for continue dump + */ +static int rtas_ibm_platform_dump(struct ibm_platform_dump_params *params, + phys_addr_t buf_addr, u32 buf_length) +{ + u32 rets[4]; + s32 fwrc; + int ret = 0; + + do { + fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_PLATFORM_DUMP), + 6, 5, + rets, + params->dump_tag_hi, + params->dump_tag_lo, + params->sequence_hi, + params->sequence_lo, + buf_addr, + buf_length); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_NOT_AUTHORIZED: + ret = -EPERM; + break; + case RTAS_IBM_PLATFORM_DUMP_CONTINUE: + case RTAS_IBM_PLATFORM_DUMP_COMPLETE: + params->sequence_hi = rets[0]; + params->sequence_lo = rets[1]; + params->bytes_ret_hi = rets[2]; + params->bytes_ret_lo = rets[3]; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,platform-dump status %d\n", + fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Platform dump is used with multiple RTAS calls to retrieve the + * complete dump for the provided dump ID. Once the complete dump is + * retrieved, the hypervisor returns dump complete status (0) for the + * last RTAS call and expects the caller issues one more call with + * NULL buffer to invalidate the dump so that the hypervisor can remove + * the dump. + * + * After the specific dump is invalidated in the hypervisor, expect the + * dump complete status for the new sequence - the user space initiates + * new request for the same dump ID. + */ +static ssize_t papr_platform_dump_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + struct ibm_platform_dump_params *params = file->private_data; + u64 total_bytes; + s32 fwrc; + + /* + * Dump already completed with the previous read calls. + * In case if the user space issues further reads, returns + * -EINVAL. + */ + if (!params->buf_length) { + pr_warn_once("Platform dump completed for dump ID %llu\n", + (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo)); + return -EINVAL; + } + + /* + * The hypervisor returns status 0 if no more data available to + * download. The dump will be invalidated with ioctl (see below). + */ + if (params->status == RTAS_IBM_PLATFORM_DUMP_COMPLETE) { + params->buf_length = 0; + /* + * Returns 0 to the user space so that user + * space read stops. + */ + return 0; + } + + if (size < SZ_1K) { + pr_err_once("Buffer length should be minimum 1024 bytes\n"); + return -EINVAL; + } else if (size > params->buf_length) { + /* + * Allocate 4K work area. So if the user requests > 4K, + * resize the buffer length. + */ + size = params->buf_length; + } + + fwrc = rtas_ibm_platform_dump(params, + rtas_work_area_phys(params->work_area), + size); + if (fwrc < 0) + return fwrc; + + total_bytes = (u64) (((u64)params->bytes_ret_hi << 32) | + params->bytes_ret_lo); + + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(total_bytes > size, "possible write beyond end of work area")) + return -EFAULT; + + if (copy_to_user(buf, rtas_work_area_raw_buf(params->work_area), + total_bytes)) + return -EFAULT; + + return total_bytes; +} + +static int papr_platform_dump_handle_release(struct inode *inode, + struct file *file) +{ + struct ibm_platform_dump_params *params = file->private_data; + + if (params->work_area) + rtas_work_area_free(params->work_area); + + mutex_lock(&platform_dump_list_mutex); + list_del(¶ms->list); + mutex_unlock(&platform_dump_list_mutex); + + kfree(params); + file->private_data = NULL; + return 0; +} + +/* + * This ioctl is used to invalidate the dump assuming the user space + * issue this ioctl after obtain the complete dump. + * Issue the last RTAS call with NULL buffer to invalidate the dump + * which means dump will be freed in the hypervisor. + */ +static long papr_platform_dump_invalidate_ioctl(struct file *file, + unsigned int ioctl, unsigned long arg) +{ + struct ibm_platform_dump_params *params; + u64 __user *argp = (void __user *)arg; + u64 param_dump_tag, dump_tag; + + if (ioctl != PAPR_PLATFORM_DUMP_IOC_INVALIDATE) + return -ENOIOCTLCMD; + + if (get_user(dump_tag, argp)) + return -EFAULT; + + /* + * private_data is freeded during release(), so should not + * happen. + */ + if (!file->private_data) { + pr_err("No valid FD to invalidate dump for the ID(%llu)\n", + dump_tag); + return -EINVAL; + } + + params = file->private_data; + param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo); + if (dump_tag != param_dump_tag) { + pr_err("Invalid dump ID(%llu) to invalidate dump\n", + dump_tag); + return -EINVAL; + } + + if (params->status != RTAS_IBM_PLATFORM_DUMP_COMPLETE) { + pr_err("Platform dump is not complete, but requested " + "to invalidate dump for ID(%llu)\n", + dump_tag); + return -EINPROGRESS; + } + + return rtas_ibm_platform_dump(params, 0, 0); +} + +static const struct file_operations papr_platform_dump_handle_ops = { + .read = papr_platform_dump_handle_read, + .release = papr_platform_dump_handle_release, + .unlocked_ioctl = papr_platform_dump_invalidate_ioctl, +}; + +/** + * papr_platform_dump_create_handle() - Create a fd-based handle for + * reading platform dump + * + * Handler for PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE ioctl command + * Allocates RTAS parameter struct and work area and attached to the + * file descriptor for reading by user space with the multiple RTAS + * calls until the dump is completed. This memory allocation is freed + * when the file is released. + * + * Multiple dump requests with different IDs are allowed at the same + * time, but not with the same dump ID. So if the user space is + * already opened file descriptor for the specific dump ID, return + * -EALREADY for the next request. + * + * @dump_tag: Dump ID for the dump requested to retrieve from the + * hypervisor + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_platform_dump_create_handle(u64 dump_tag) +{ + struct ibm_platform_dump_params *params; + u64 param_dump_tag; + struct file *file; + long err; + int fd; + + /* + * Return failure if the user space is already opened FD for + * the specific dump ID. This check will prevent multiple dump + * requests for the same dump ID at the same time. Generally + * should not expect this, but in case. + */ + list_for_each_entry(params, &platform_dump_list, list) { + param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo); + if (dump_tag == param_dump_tag) { + pr_err("Platform dump for ID(%llu) is already in progress\n", + dump_tag); + return -EALREADY; + } + } + + params = kzalloc(sizeof(struct ibm_platform_dump_params), + GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + params->work_area = rtas_work_area_alloc(SZ_4K); + params->buf_length = SZ_4K; + params->dump_tag_hi = (u32)(dump_tag >> 32); + params->dump_tag_lo = (u32)(dump_tag & 0x00000000ffffffffULL); + params->status = RTAS_IBM_PLATFORM_DUMP_START; + + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); + if (fd < 0) { + err = fd; + goto free_area; + } + + file = anon_inode_getfile_fmode("[papr-platform-dump]", + &papr_platform_dump_handle_ops, + (void *)params, O_RDONLY, + FMODE_LSEEK | FMODE_PREAD); + if (IS_ERR(file)) { + err = PTR_ERR(file); + goto put_fd; + } + + fd_install(fd, file); + + list_add(¶ms->list, &platform_dump_list); + + pr_info("%s (%d) initiated platform dump for dump tag %llu\n", + current->comm, current->pid, dump_tag); + return fd; +put_fd: + put_unused_fd(fd); +free_area: + rtas_work_area_free(params->work_area); + kfree(params); + return err; +} + +/* + * Top-level ioctl handler for /dev/papr-platform-dump. + */ +static long papr_platform_dump_dev_ioctl(struct file *filp, + unsigned int ioctl, + unsigned long arg) +{ + u64 __user *argp = (void __user *)arg; + u64 dump_tag; + long ret; + + if (get_user(dump_tag, argp)) + return -EFAULT; + + switch (ioctl) { + case PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE: + mutex_lock(&platform_dump_list_mutex); + ret = papr_platform_dump_create_handle(dump_tag); + mutex_unlock(&platform_dump_list_mutex); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_platform_dump_ops = { + .unlocked_ioctl = papr_platform_dump_dev_ioctl, +}; + +static struct miscdevice papr_platform_dump_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-platform-dump", + .fops = &papr_platform_dump_ops, +}; + +static __init int papr_platform_dump_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_PLATFORM_DUMP)) + return -ENODEV; + + return misc_register(&papr_platform_dump_dev); +} +machine_device_initcall(pseries, papr_platform_dump_init); diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.c b/arch/powerpc/platforms/pseries/papr-rtas-common.c new file mode 100644 index 000000000000..33c606e3378a --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-rtas-common.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-common: " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/sched/signal.h> +#include "papr-rtas-common.h" + +/* + * Sequence based RTAS HCALL has to issue multiple times to retrieve + * complete data from the hypervisor. For some of these RTAS calls, + * the OS should not interleave calls with different input until the + * sequence is completed. So data is collected for these calls during + * ioctl handle and export to user space with read() handle. + * This file provides common functions needed for such sequence based + * RTAS calls Ex: ibm,get-vpd and ibm,get-indices. + */ + +bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob) +{ + return blob->data && blob->len; +} + +void papr_rtas_blob_free(const struct papr_rtas_blob *blob) +{ + if (blob) { + kvfree(blob->data); + kfree(blob); + } +} + +/** + * papr_rtas_blob_extend() - Append data to a &struct papr_rtas_blob. + * @blob: The blob to extend. + * @data: The new data to append to @blob. + * @len: The length of @data. + * + * Context: May sleep. + * Return: -ENOMEM on allocation failure, 0 otherwise. + */ +static int papr_rtas_blob_extend(struct papr_rtas_blob *blob, + const char *data, size_t len) +{ + const size_t new_len = blob->len + len; + const size_t old_len = blob->len; + const char *old_ptr = blob->data; + char *new_ptr; + + new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT); + if (!new_ptr) + return -ENOMEM; + + memcpy(&new_ptr[old_len], data, len); + blob->data = new_ptr; + blob->len = new_len; + return 0; +} + +/** + * papr_rtas_blob_generate() - Construct a new &struct papr_rtas_blob. + * @seq: work function of the caller that is called to obtain + * data with the caller RTAS call. + * + * The @work callback is invoked until it returns NULL. @seq is + * passed to @work in its first argument on each call. When + * @work returns data, it should store the data length in its + * second argument. + * + * Context: May sleep. + * Return: A completely populated &struct papr_rtas_blob, or NULL on error. + */ +static const struct papr_rtas_blob * +papr_rtas_blob_generate(struct papr_rtas_sequence *seq) +{ + struct papr_rtas_blob *blob; + const char *buf; + size_t len; + int err = 0; + + blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT); + if (!blob) + return NULL; + + if (!seq->work) + return ERR_PTR(-EINVAL); + + + while (err == 0 && (buf = seq->work(seq, &len))) + err = papr_rtas_blob_extend(blob, buf, len); + + if (err != 0 || !papr_rtas_blob_has_data(blob)) + goto free_blob; + + return blob; +free_blob: + papr_rtas_blob_free(blob); + return NULL; +} + +int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, int err) +{ + /* Preserve the first error recorded. */ + if (seq->error == 0) + seq->error = err; + + return seq->error; +} + +/* + * Higher-level retrieval code below. These functions use the + * papr_rtas_blob_* and sequence_* APIs defined above to create fd-based + * handles for consumption by user space. + */ + +/** + * papr_rtas_run_sequence() - Run a single retrieval sequence. + * @seq: Functions of the caller to complete the sequence + * + * Context: May sleep. Holds a mutex and an RTAS work area for its + * duration. Typically performs multiple sleepable slab + * allocations. + * + * Return: A populated &struct papr_rtas_blob on success. Encoded error + * pointer otherwise. + */ +static const struct papr_rtas_blob *papr_rtas_run_sequence(struct papr_rtas_sequence *seq) +{ + const struct papr_rtas_blob *blob; + + if (seq->begin) + seq->begin(seq); + + blob = papr_rtas_blob_generate(seq); + if (!blob) + papr_rtas_sequence_set_err(seq, -ENOMEM); + + if (seq->end) + seq->end(seq); + + + if (seq->error) { + papr_rtas_blob_free(blob); + return ERR_PTR(seq->error); + } + + return blob; +} + +/** + * papr_rtas_retrieve() - Return the data blob that is exposed to + * user space. + * @seq: RTAS call specific functions to be invoked until the + * sequence is completed. + * + * Run sequences against @param until a blob is successfully + * instantiated, or a hard error is encountered, or a fatal signal is + * pending. + * + * Context: May sleep. + * Return: A fully populated data blob when successful. Encoded error + * pointer otherwise. + */ +const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq) +{ + const struct papr_rtas_blob *blob; + + /* + * EAGAIN means the sequence returns error with a -4 (data + * changed and need to start the sequence) status from RTAS calls + * and we should attempt a new sequence. PAPR+ (v2.13 R1–7.3.20–5 + * - ibm,get-vpd, R1–7.3.17–6 - ibm,get-indices) indicates that + * this should be a transient condition, not something that + * happens continuously. But we'll stop trying on a fatal signal. + */ + do { + blob = papr_rtas_run_sequence(seq); + if (!IS_ERR(blob)) /* Success. */ + break; + if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */ + break; + cond_resched(); + } while (!fatal_signal_pending(current)); + + return blob; +} + +/** + * papr_rtas_setup_file_interface - Complete the sequence and obtain + * the data and export to user space with fd-based handles. Then the + * user spave gets the data with read() handle. + * @seq: RTAS call specific functions to get the data. + * @fops: RTAS call specific file operations such as read(). + * @name: RTAS call specific char device node. + * + * Return: FD handle for consumption by user space + */ +long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq, + const struct file_operations *fops, + char *name) +{ + const struct papr_rtas_blob *blob; + struct file *file; + long ret; + int fd; + + blob = papr_rtas_retrieve(seq); + if (IS_ERR(blob)) + return PTR_ERR(blob); + + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto free_blob; + } + + file = anon_inode_getfile_fmode(name, fops, (void *)blob, + O_RDONLY, FMODE_LSEEK | FMODE_PREAD); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto put_fd; + } + + fd_install(fd, file); + return fd; + +put_fd: + put_unused_fd(fd); +free_blob: + papr_rtas_blob_free(blob); + return ret; +} + +/* + * papr_rtas_sequence_should_stop() - Determine whether RTAS retrieval + * sequence should continue. + * + * Examines the sequence error state and outputs of the last call to + * the specific RTAS to determine whether the sequence in progress + * should continue or stop. + * + * Return: True if the sequence has encountered an error or if all data + * for this sequence has been retrieved. False otherwise. + */ +bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq, + s32 status, bool init_state) +{ + bool done; + + if (seq->error) + return true; + + switch (status) { + case RTAS_SEQ_COMPLETE: + if (init_state) + done = false; /* Initial state. */ + else + done = true; /* All data consumed. */ + break; + case RTAS_SEQ_MORE_DATA: + done = false; /* More data available. */ + break; + default: + done = true; /* Error encountered. */ + break; + } + + return done; +} + +/* + * User space read to retrieve data for the corresponding RTAS call. + * papr_rtas_blob is filled with the data using the corresponding RTAS + * call sequence API. + */ +ssize_t papr_rtas_common_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + const struct papr_rtas_blob *blob = file->private_data; + + /* We should not instantiate a handle without any data attached. */ + if (!papr_rtas_blob_has_data(blob)) { + pr_err_once("handle without data\n"); + return -EIO; + } + + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); +} + +int papr_rtas_common_handle_release(struct inode *inode, + struct file *file) +{ + const struct papr_rtas_blob *blob = file->private_data; + + papr_rtas_blob_free(blob); + + return 0; +} + +loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off, + int whence) +{ + const struct papr_rtas_blob *blob = file->private_data; + + return fixed_size_llseek(file, off, whence, blob->len); +} diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.h b/arch/powerpc/platforms/pseries/papr-rtas-common.h new file mode 100644 index 000000000000..4ceabcaf4905 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-rtas-common.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_POWERPC_PAPR_RTAS_COMMON_H +#define _ASM_POWERPC_PAPR_RTAS_COMMON_H + +#include <linux/types.h> + +/* + * Return codes for sequence based RTAS calls. + * Not listed under PAPR+ v2.13 7.2.8: "Return Codes". + * But defined in the specific section of each RTAS call. + */ +#define RTAS_SEQ_COMPLETE 0 /* All data has been retrieved. */ +#define RTAS_SEQ_MORE_DATA 1 /* More data is available */ +#define RTAS_SEQ_START_OVER -4 /* Data changed, restart call sequence. */ + +/* + * Internal "blob" APIs for accumulating RTAS call results into + * an immutable buffer to be attached to a file descriptor. + */ +struct papr_rtas_blob { + const char *data; + size_t len; +}; + +/** + * struct papr_sequence - State for managing a sequence of RTAS calls. + * @error: Shall be zero as long as the sequence has not encountered an error, + * -ve errno otherwise. Use papr_rtas_sequence_set_err() to update. + * @params: Parameter block to pass to rtas_*() calls. + * @begin: Work area allocation and initialize the needed parameter + * values passed to RTAS call + * @end: Free the allocated work area + * @work: Obtain data with RTAS call and invoke it until the sequence is + * completed. + * + */ +struct papr_rtas_sequence { + int error; + void *params; + void (*begin)(struct papr_rtas_sequence *seq); + void (*end)(struct papr_rtas_sequence *seq); + const char *(*work)(struct papr_rtas_sequence *seq, size_t *len); +}; + +extern bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob); +extern void papr_rtas_blob_free(const struct papr_rtas_blob *blob); +extern int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, + int err); +extern const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq); +extern long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq, + const struct file_operations *fops, char *name); +extern bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq, + s32 status, bool init_state); +extern ssize_t papr_rtas_common_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off); +extern int papr_rtas_common_handle_release(struct inode *inode, + struct file *file); +extern loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off, + int whence); +#endif /* _ASM_POWERPC_PAPR_RTAS_COMMON_H */ + diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c index c29e85db5f35..f38c188fc4a1 100644 --- a/arch/powerpc/platforms/pseries/papr-vpd.c +++ b/arch/powerpc/platforms/pseries/papr-vpd.c @@ -2,7 +2,6 @@ #define pr_fmt(fmt) "papr-vpd: " fmt -#include <linux/anon_inodes.h> #include <linux/build_bug.h> #include <linux/file.h> #include <linux/fs.h> @@ -20,14 +19,7 @@ #include <asm/rtas-work-area.h> #include <asm/rtas.h> #include <uapi/asm/papr-vpd.h> - -/* - * Function-specific return values for ibm,get-vpd, derived from PAPR+ - * v2.13 7.3.20 "ibm,get-vpd RTAS Call". - */ -#define RTAS_IBM_GET_VPD_COMPLETE 0 /* All VPD has been retrieved. */ -#define RTAS_IBM_GET_VPD_MORE_DATA 1 /* More VPD is available. */ -#define RTAS_IBM_GET_VPD_START_OVER -4 /* VPD changed, restart call sequence. */ +#include "papr-rtas-common.h" /** * struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd. @@ -91,13 +83,14 @@ static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params) case RTAS_INVALID_PARAMETER: ret = -EINVAL; break; - case RTAS_IBM_GET_VPD_START_OVER: + case RTAS_SEQ_START_OVER: ret = -EAGAIN; + pr_info_ratelimited("VPD changed during retrieval, retrying\n"); break; - case RTAS_IBM_GET_VPD_MORE_DATA: + case RTAS_SEQ_MORE_DATA: params->sequence = rets[0]; fallthrough; - case RTAS_IBM_GET_VPD_COMPLETE: + case RTAS_SEQ_COMPLETE: params->written = rets[1]; /* * Kernel or firmware bug, do not continue. @@ -119,94 +112,6 @@ static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params) } /* - * Internal VPD "blob" APIs for accumulating ibm,get-vpd results into - * an immutable buffer to be attached to a file descriptor. - */ -struct vpd_blob { - const char *data; - size_t len; -}; - -static bool vpd_blob_has_data(const struct vpd_blob *blob) -{ - return blob->data && blob->len; -} - -static void vpd_blob_free(const struct vpd_blob *blob) -{ - if (blob) { - kvfree(blob->data); - kfree(blob); - } -} - -/** - * vpd_blob_extend() - Append data to a &struct vpd_blob. - * @blob: The blob to extend. - * @data: The new data to append to @blob. - * @len: The length of @data. - * - * Context: May sleep. - * Return: -ENOMEM on allocation failure, 0 otherwise. - */ -static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len) -{ - const size_t new_len = blob->len + len; - const size_t old_len = blob->len; - const char *old_ptr = blob->data; - char *new_ptr; - - new_ptr = old_ptr ? - kvrealloc(old_ptr, old_len, new_len, GFP_KERNEL_ACCOUNT) : - kvmalloc(len, GFP_KERNEL_ACCOUNT); - - if (!new_ptr) - return -ENOMEM; - - memcpy(&new_ptr[old_len], data, len); - blob->data = new_ptr; - blob->len = new_len; - return 0; -} - -/** - * vpd_blob_generate() - Construct a new &struct vpd_blob. - * @generator: Function that supplies the blob data. - * @arg: Context pointer supplied by caller, passed to @generator. - * - * The @generator callback is invoked until it returns NULL. @arg is - * passed to @generator in its first argument on each call. When - * @generator returns data, it should store the data length in its - * second argument. - * - * Context: May sleep. - * Return: A completely populated &struct vpd_blob, or NULL on error. - */ -static const struct vpd_blob * -vpd_blob_generate(const char * (*generator)(void *, size_t *), void *arg) -{ - struct vpd_blob *blob; - const char *buf; - size_t len; - int err = 0; - - blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT); - if (!blob) - return NULL; - - while (err == 0 && (buf = generator(arg, &len))) - err = vpd_blob_extend(blob, buf, len); - - if (err != 0 || !vpd_blob_has_data(blob)) - goto free_blob; - - return blob; -free_blob: - vpd_blob_free(blob); - return NULL; -} - -/* * Internal VPD sequence APIs. A VPD sequence is a series of calls to * ibm,get-vpd for a given location code. The sequence ends when an * error is encountered or all VPD for the location code has been @@ -214,30 +119,14 @@ free_blob: */ /** - * struct vpd_sequence - State for managing a VPD sequence. - * @error: Shall be zero as long as the sequence has not encountered an error, - * -ve errno otherwise. Use vpd_sequence_set_err() to update this. - * @params: Parameter block to pass to rtas_ibm_get_vpd(). - */ -struct vpd_sequence { - int error; - struct rtas_ibm_get_vpd_params params; -}; - -/** * vpd_sequence_begin() - Begin a VPD retrieval sequence. - * @seq: Uninitialized sequence state. - * @loc_code: Location code that defines the scope of the VPD to return. - * - * Initializes @seq with the resources necessary to carry out a VPD - * sequence. Callers must pass @seq to vpd_sequence_end() regardless - * of whether the sequence succeeds. + * @seq: vpd call parameters from sequence struct * * Context: May sleep. */ -static void vpd_sequence_begin(struct vpd_sequence *seq, - const struct papr_location_code *loc_code) +static void vpd_sequence_begin(struct papr_rtas_sequence *seq) { + struct rtas_ibm_get_vpd_params *vpd_params; /* * Use a static data structure for the location code passed to * RTAS to ensure it's in the RMA and avoid a separate work @@ -245,6 +134,7 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, */ static struct papr_location_code static_loc_code; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; /* * We could allocate the work area before acquiring the * function lock, but that would allow concurrent requests to @@ -252,14 +142,12 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, * allocate the work area under the lock. */ mutex_lock(&rtas_ibm_get_vpd_lock); - static_loc_code = *loc_code; - *seq = (struct vpd_sequence) { - .params = { - .work_area = rtas_work_area_alloc(SZ_4K), - .loc_code = &static_loc_code, - .sequence = 1, - }, - }; + static_loc_code = *(struct papr_location_code *)vpd_params->loc_code; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + vpd_params->work_area = rtas_work_area_alloc(SZ_4K); + vpd_params->loc_code = &static_loc_code; + vpd_params->sequence = 1; + vpd_params->status = 0; } /** @@ -268,180 +156,39 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, * * Releases resources obtained by vpd_sequence_begin(). */ -static void vpd_sequence_end(struct vpd_sequence *seq) -{ - rtas_work_area_free(seq->params.work_area); - mutex_unlock(&rtas_ibm_get_vpd_lock); -} - -/** - * vpd_sequence_should_stop() - Determine whether a VPD retrieval sequence - * should continue. - * @seq: VPD sequence state. - * - * Examines the sequence error state and outputs of the last call to - * ibm,get-vpd to determine whether the sequence in progress should - * continue or stop. - * - * Return: True if the sequence has encountered an error or if all VPD for - * this sequence has been retrieved. False otherwise. - */ -static bool vpd_sequence_should_stop(const struct vpd_sequence *seq) +static void vpd_sequence_end(struct papr_rtas_sequence *seq) { - bool done; + struct rtas_ibm_get_vpd_params *vpd_params; - if (seq->error) - return true; - - switch (seq->params.status) { - case 0: - if (seq->params.written == 0) - done = false; /* Initial state. */ - else - done = true; /* All data consumed. */ - break; - case 1: - done = false; /* More data available. */ - break; - default: - done = true; /* Error encountered. */ - break; - } - - return done; -} - -static int vpd_sequence_set_err(struct vpd_sequence *seq, int err) -{ - /* Preserve the first error recorded. */ - if (seq->error == 0) - seq->error = err; - - return seq->error; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + rtas_work_area_free(vpd_params->work_area); + mutex_unlock(&rtas_ibm_get_vpd_lock); } /* - * Generator function to be passed to vpd_blob_generate(). + * Generator function to be passed to papr_rtas_blob_generate(). */ -static const char *vpd_sequence_fill_work_area(void *arg, size_t *len) +static const char *vpd_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) { - struct vpd_sequence *seq = arg; - struct rtas_ibm_get_vpd_params *p = &seq->params; + struct rtas_ibm_get_vpd_params *p; + bool init_state; + + p = (struct rtas_ibm_get_vpd_params *)seq->params; + init_state = (p->written == 0) ? true : false; - if (vpd_sequence_should_stop(seq)) + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) return NULL; - if (vpd_sequence_set_err(seq, rtas_ibm_get_vpd(p))) + if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_vpd(p))) return NULL; *len = p->written; return rtas_work_area_raw_buf(p->work_area); } -/* - * Higher-level VPD retrieval code below. These functions use the - * vpd_blob_* and vpd_sequence_* APIs defined above to create fd-based - * VPD handles for consumption by user space. - */ - -/** - * papr_vpd_run_sequence() - Run a single VPD retrieval sequence. - * @loc_code: Location code that defines the scope of VPD to return. - * - * Context: May sleep. Holds a mutex and an RTAS work area for its - * duration. Typically performs multiple sleepable slab - * allocations. - * - * Return: A populated &struct vpd_blob on success. Encoded error - * pointer otherwise. - */ -static const struct vpd_blob *papr_vpd_run_sequence(const struct papr_location_code *loc_code) -{ - const struct vpd_blob *blob; - struct vpd_sequence seq; - - vpd_sequence_begin(&seq, loc_code); - blob = vpd_blob_generate(vpd_sequence_fill_work_area, &seq); - if (!blob) - vpd_sequence_set_err(&seq, -ENOMEM); - vpd_sequence_end(&seq); - - if (seq.error) { - vpd_blob_free(blob); - return ERR_PTR(seq.error); - } - - return blob; -} - -/** - * papr_vpd_retrieve() - Return the VPD for a location code. - * @loc_code: Location code that defines the scope of VPD to return. - * - * Run VPD sequences against @loc_code until a blob is successfully - * instantiated, or a hard error is encountered, or a fatal signal is - * pending. - * - * Context: May sleep. - * Return: A fully populated VPD blob when successful. Encoded error - * pointer otherwise. - */ -static const struct vpd_blob *papr_vpd_retrieve(const struct papr_location_code *loc_code) -{ - const struct vpd_blob *blob; - - /* - * EAGAIN means the sequence errored with a -4 (VPD changed) - * status from ibm,get-vpd, and we should attempt a new - * sequence. PAPR+ v2.13 R1–7.3.20–5 indicates that this - * should be a transient condition, not something that happens - * continuously. But we'll stop trying on a fatal signal. - */ - do { - blob = papr_vpd_run_sequence(loc_code); - if (!IS_ERR(blob)) /* Success. */ - break; - if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */ - break; - pr_info_ratelimited("VPD changed during retrieval, retrying\n"); - cond_resched(); - } while (!fatal_signal_pending(current)); - - return blob; -} - -static ssize_t papr_vpd_handle_read(struct file *file, char __user *buf, size_t size, loff_t *off) -{ - const struct vpd_blob *blob = file->private_data; - - /* bug: we should not instantiate a handle without any data attached. */ - if (!vpd_blob_has_data(blob)) { - pr_err_once("handle without data\n"); - return -EIO; - } - - return simple_read_from_buffer(buf, size, off, blob->data, blob->len); -} - -static int papr_vpd_handle_release(struct inode *inode, struct file *file) -{ - const struct vpd_blob *blob = file->private_data; - - vpd_blob_free(blob); - - return 0; -} - -static loff_t papr_vpd_handle_seek(struct file *file, loff_t off, int whence) -{ - const struct vpd_blob *blob = file->private_data; - - return fixed_size_llseek(file, off, whence, blob->len); -} - - static const struct file_operations papr_vpd_handle_ops = { - .read = papr_vpd_handle_read, - .llseek = papr_vpd_handle_seek, - .release = papr_vpd_handle_release, + .read = papr_rtas_common_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, }; /** @@ -463,10 +210,9 @@ static const struct file_operations papr_vpd_handle_ops = { */ static long papr_vpd_create_handle(struct papr_location_code __user *ulc) { + struct rtas_ibm_get_vpd_params vpd_params = {}; + struct papr_rtas_sequence seq = {}; struct papr_location_code klc; - const struct vpd_blob *blob; - struct file *file; - long err; int fd; if (copy_from_user(&klc, ulc, sizeof(klc))) @@ -475,31 +221,19 @@ static long papr_vpd_create_handle(struct papr_location_code __user *ulc) if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str))) return -EINVAL; - blob = papr_vpd_retrieve(&klc); - if (IS_ERR(blob)) - return PTR_ERR(blob); + seq = (struct papr_rtas_sequence) { + .begin = vpd_sequence_begin, + .end = vpd_sequence_end, + .work = vpd_sequence_fill_work_area, + }; - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); - if (fd < 0) { - err = fd; - goto free_blob; - } + vpd_params.loc_code = &klc; + seq.params = (void *)&vpd_params; - file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops, - (void *)blob, O_RDONLY); - if (IS_ERR(file)) { - err = PTR_ERR(file); - goto put_fd; - } + fd = papr_rtas_setup_file_interface(&seq, &papr_vpd_handle_ops, + "[papr-vpd]"); - file->f_mode |= FMODE_LSEEK | FMODE_PREAD; - fd_install(fd, file); return fd; -put_fd: - put_unused_fd(fd); -free_blob: - vpd_blob_free(blob); - return err; } /* diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index c233f9db039b..f7c9271bda58 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/ioport.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/ndctl.h> #include <linux/sched.h> @@ -16,9 +17,10 @@ #include <linux/nd.h> #include <asm/plpar_wrappers.h> -#include <asm/papr_pdsm.h> +#include <uapi/linux/papr_pdsm.h> +#include <linux/papr_scm.h> #include <asm/mce.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/perf_event.h> #define BIND_ANY_ADDR (~0ul) @@ -29,46 +31,6 @@ (1ul << ND_CMD_SET_CONFIG_DATA) | \ (1ul << ND_CMD_CALL)) -/* DIMM health bitmap indicators */ -/* SCM device is unable to persist memory contents */ -#define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) -/* SCM device failed to persist memory contents */ -#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1)) -/* SCM device contents are persisted from previous IPL */ -#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2)) -/* SCM device contents are not persisted from previous IPL */ -#define PAPR_PMEM_EMPTY (1ULL << (63 - 3)) -/* SCM device memory life remaining is critically low */ -#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4)) -/* SCM device will be garded off next IPL due to failure */ -#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5)) -/* SCM contents cannot persist due to current platform health status */ -#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6)) -/* SCM device is unable to persist memory contents in certain conditions */ -#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7)) -/* SCM device is encrypted */ -#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8)) -/* SCM device has been scrubbed and locked */ -#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9)) - -/* Bits status indicators for health bitmap indicating unarmed dimm */ -#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -/* Bits status indicators for health bitmap indicating unflushed dimm */ -#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY) - -/* Bits status indicators for health bitmap indicating unrestored dimm */ -#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY) - -/* Bit status indicators for smart event notification */ -#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \ - PAPR_PMEM_HEALTH_FATAL | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS) -#define PAPR_SCM_PERF_STATS_VERSION 0x1 - /* Struct holding a single performance metric */ struct papr_scm_perf_stat { u8 stat_id[8]; @@ -582,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p) /* Jiffies offset for which the health data is assumed to be same */ cache_timeout = p->lasthealth_jiffies + - msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000); + secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL); /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */ if (time_after(jiffies, cache_timeout)) @@ -1548,7 +1510,7 @@ static const struct of_device_id papr_scm_match[] = { static struct platform_driver papr_scm_driver = { .probe = papr_scm_probe, - .remove_new = papr_scm_remove, + .remove = papr_scm_remove, .driver = { .name = "papr_scm", .of_match_table = papr_scm_match, @@ -1575,5 +1537,6 @@ static void __exit papr_scm_exit(void) module_exit(papr_scm_exit); MODULE_DEVICE_TABLE(of, papr_scm_match); +MODULE_DESCRIPTION("PAPR Storage Class Memory interface driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("IBM Corporation"); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 1772ae3d193d..6dbc73eb2ca2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -18,33 +18,6 @@ #include <asm/pci.h> #include "pseries.h" -#if 0 -void pcibios_name_device(struct pci_dev *dev) -{ - struct device_node *dn; - - /* - * Add IBM loc code (slot) as a prefix to the device names for service - */ - dn = pci_device_to_OF_node(dev); - if (dn) { - const char *loc_code = of_get_property(dn, "ibm,loc-code", - NULL); - if (loc_code) { - int loc_len = strlen(loc_code); - if (loc_len < sizeof(dev->dev.name)) { - memmove(dev->dev.name+loc_len+1, dev->dev.name, - sizeof(dev->dev.name)-loc_len-1); - memcpy(dev->dev.name, loc_code, loc_len); - dev->dev.name[loc_len] = ' '; - dev->dev.name[sizeof(dev->dev.name)-1] = '\0'; - } - } - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); -#endif - #ifdef CONFIG_PCI_IOV #define MAX_VFS_FOR_MAP_PE 256 struct pe_map_bar_entry { diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 4448386268d9..52e2623a741d 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/export.h> +#include <linux/node.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> @@ -21,9 +22,22 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn) { struct pci_controller *phb; + int nid; pr_debug("PCI: Initializing new hotplug PHB %pOF\n", dn); + nid = of_node_to_nid(dn); + if (likely((nid) >= 0)) { + if (!node_online(nid)) { + if (__register_one_node(nid)) { + pr_err("PCI: Failed to register node %d\n", nid); + } else { + update_numa_distance(dn); + node_set_online(nid); + } + } + } + phb = pcibios_alloc_controller(dn); if (!phb) return NULL; diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c index febe18f251d0..b1667ed05f98 100644 --- a/arch/powerpc/platforms/pseries/plpks.c +++ b/arch/powerpc/platforms/pseries/plpks.c @@ -415,8 +415,7 @@ static int plpks_confirm_object_flushed(struct label *label, break; } - usleep_range(PLPKS_FLUSH_SLEEP, - PLPKS_FLUSH_SLEEP + PLPKS_FLUSH_SLEEP_RANGE); + fsleep(PLPKS_FLUSH_SLEEP); timeout = timeout + PLPKS_FLUSH_SLEEP; } while (timeout < PLPKS_MAX_TIMEOUT); @@ -464,9 +463,10 @@ int plpks_signed_update_var(struct plpks_var *var, u64 flags) continuetoken = retbuf[0]; if (pseries_status_to_err(rc) == -EBUSY) { - int delay_ms = get_longbusy_msecs(rc); - mdelay(delay_ms); - timeout += delay_ms; + int delay_us = get_longbusy_msecs(rc) * 1000; + + fsleep(delay_us); + timeout += delay_us; } rc = pseries_status_to_err(rc); } while (rc == -EBUSY && timeout < PLPKS_MAX_TIMEOUT); @@ -683,7 +683,7 @@ void __init plpks_early_init_devtree(void) out: fdt_nop_property(fdt, chosen_node, "ibm,plpks-pw"); // Since we've cleared the password, we must update the FDT checksum - early_init_dt_verify(fdt); + early_init_dt_verify(fdt, __pa(fdt)); } static __init int pseries_plpks_init(void) diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index 3c290b9ed01b..0f1d45f32e4a 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -121,7 +121,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) return -EINVAL; } - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); lock_device_hotplug(); diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index bba4ad192b0f..3968a6970fa8 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { } #endif extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary); -void pseries_machine_kexec(struct kimage *image); extern void pSeries_final_fixup(void); diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c index b5853e9fcc3c..eceb3289383e 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.c +++ b/arch/powerpc/platforms/pseries/rtas-fadump.c @@ -18,6 +18,7 @@ #include <asm/page.h> #include <asm/rtas.h> +#include <asm/setup.h> #include <asm/fadump.h> #include <asm/fadump-internal.h> @@ -29,9 +30,6 @@ static const struct rtas_fadump_mem_struct *fdm_active; static void rtas_fadump_update_config(struct fw_dump *fadump_conf, const struct rtas_fadump_mem_struct *fdm) { - fadump_conf->boot_mem_dest_addr = - be64_to_cpu(fdm->rmr_region.destination_address); - fadump_conf->fadumphdr_addr = (fadump_conf->boot_mem_dest_addr + fadump_conf->boot_memory_size); } @@ -43,20 +41,56 @@ static void rtas_fadump_update_config(struct fw_dump *fadump_conf, static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf, const struct rtas_fadump_mem_struct *fdm) { - fadump_conf->boot_mem_addr[0] = - be64_to_cpu(fdm->rmr_region.source_address); - fadump_conf->boot_mem_sz[0] = be64_to_cpu(fdm->rmr_region.source_len); - fadump_conf->boot_memory_size = fadump_conf->boot_mem_sz[0]; + unsigned long base, size, last_end, hole_size; - fadump_conf->boot_mem_top = fadump_conf->boot_memory_size; - fadump_conf->boot_mem_regs_cnt = 1; + last_end = 0; + hole_size = 0; + fadump_conf->boot_memory_size = 0; + fadump_conf->boot_mem_regs_cnt = 0; + pr_debug("Boot memory regions:\n"); + for (int i = 0; i < be16_to_cpu(fdm->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm->rgn[i].source_data_type); + u64 addr; - /* - * Start address of reserve dump area (permanent reservation) for - * re-registering FADump after dump capture. - */ - fadump_conf->reserve_dump_area_start = - be64_to_cpu(fdm->cpu_state_data.destination_address); + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + addr = be64_to_cpu(fdm->rgn[i].destination_address); + + fadump_conf->cpu_state_dest_vaddr = (u64)__va(addr); + /* + * Start address of reserve dump area (permanent reservation) for + * re-registering FADump after dump capture. + */ + fadump_conf->reserve_dump_area_start = addr; + break; + case RTAS_FADUMP_HPTE_REGION: + /* Not processed currently. */ + break; + case RTAS_FADUMP_REAL_MODE_REGION: + base = be64_to_cpu(fdm->rgn[i].source_address); + size = be64_to_cpu(fdm->rgn[i].source_len); + pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size); + if (!base) { + fadump_conf->boot_mem_dest_addr = + be64_to_cpu(fdm->rgn[i].destination_address); + } + + fadump_conf->boot_mem_addr[fadump_conf->boot_mem_regs_cnt] = base; + fadump_conf->boot_mem_sz[fadump_conf->boot_mem_regs_cnt] = size; + fadump_conf->boot_memory_size += size; + hole_size += (base - last_end); + last_end = base + size; + fadump_conf->boot_mem_regs_cnt++; + break; + case RTAS_FADUMP_PARAM_AREA: + fadump_conf->param_area = be64_to_cpu(fdm->rgn[i].destination_address); + break; + default: + pr_warn("Section type %d unsupported on this kernel. Ignoring!\n", type); + break; + } + } + fadump_conf->boot_mem_top = fadump_conf->boot_memory_size + hole_size; rtas_fadump_update_config(fadump_conf, fdm); } @@ -64,16 +98,15 @@ static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf, static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) { u64 addr = fadump_conf->reserve_dump_area_start; + u16 sec_cnt = 0; memset(&fdm, 0, sizeof(struct rtas_fadump_mem_struct)); addr = addr & PAGE_MASK; fdm.header.dump_format_version = cpu_to_be32(0x00000001); - fdm.header.dump_num_sections = cpu_to_be16(3); fdm.header.dump_status_flag = 0; fdm.header.offset_first_dump_section = - cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct, - cpu_state_data)); + cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct, rgn)); /* * Fields for disk dump option. @@ -89,25 +122,22 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) /* Kernel dump sections */ /* cpu state data section. */ - fdm.cpu_state_data.request_flag = - cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.cpu_state_data.source_data_type = - cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA); - fdm.cpu_state_data.source_address = 0; - fdm.cpu_state_data.source_len = - cpu_to_be64(fadump_conf->cpu_state_data_size); - fdm.cpu_state_data.destination_address = cpu_to_be64(addr); + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA); + fdm.rgn[sec_cnt].source_address = 0; + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->cpu_state_data_size); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); addr += fadump_conf->cpu_state_data_size; + sec_cnt++; /* hpte region section */ - fdm.hpte_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.hpte_region.source_data_type = - cpu_to_be16(RTAS_FADUMP_HPTE_REGION); - fdm.hpte_region.source_address = 0; - fdm.hpte_region.source_len = - cpu_to_be64(fadump_conf->hpte_region_size); - fdm.hpte_region.destination_address = cpu_to_be64(addr); + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_HPTE_REGION); + fdm.rgn[sec_cnt].source_address = 0; + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->hpte_region_size); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); addr += fadump_conf->hpte_region_size; + sec_cnt++; /* * Align boot memory area destination address to page boundary to @@ -115,14 +145,29 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) */ addr = PAGE_ALIGN(addr); - /* RMA region section */ - fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.rmr_region.source_data_type = - cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION); - fdm.rmr_region.source_address = cpu_to_be64(0); - fdm.rmr_region.source_len = cpu_to_be64(fadump_conf->boot_memory_size); - fdm.rmr_region.destination_address = cpu_to_be64(addr); - addr += fadump_conf->boot_memory_size; + /* First boot memory region destination address */ + fadump_conf->boot_mem_dest_addr = addr; + for (int i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) { + /* Boot memory regions */ + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION); + fdm.rgn[sec_cnt].source_address = cpu_to_be64(fadump_conf->boot_mem_addr[i]); + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->boot_mem_sz[i]); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); + addr += fadump_conf->boot_mem_sz[i]; + sec_cnt++; + } + + /* Parameters area */ + if (fadump_conf->param_area) { + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_PARAM_AREA); + fdm.rgn[sec_cnt].source_address = cpu_to_be64(fadump_conf->param_area); + fdm.rgn[sec_cnt].source_len = cpu_to_be64(COMMAND_LINE_SIZE); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(fadump_conf->param_area); + sec_cnt++; + } + fdm.header.dump_num_sections = cpu_to_be16(sec_cnt); rtas_fadump_update_config(fadump_conf, &fdm); @@ -136,14 +181,21 @@ static u64 rtas_fadump_get_bootmem_min(void) static int rtas_fadump_register(struct fw_dump *fadump_conf) { - unsigned int wait_time; + unsigned int wait_time, fdm_size; int rc, err = -EIO; + /* + * Platform requires the exact size of the Dump Memory Structure. + * Avoid including any unused rgns in the calculation, as this + * could result in a parameter error (-3) from the platform. + */ + fdm_size = sizeof(struct rtas_fadump_section_header); + fdm_size += be16_to_cpu(fdm.header.dump_num_sections) * sizeof(struct rtas_fadump_section); + /* TODO: Add upper time limit for the delay */ do { rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1, - NULL, FADUMP_REGISTER, &fdm, - sizeof(struct rtas_fadump_mem_struct)); + NULL, FADUMP_REGISTER, &fdm, fdm_size); wait_time = rtas_busy_delay_time(rc); if (wait_time) @@ -161,9 +213,7 @@ static int rtas_fadump_register(struct fw_dump *fadump_conf) pr_err("Failed to register. Hardware Error(%d).\n", rc); break; case -3: - if (!is_fadump_boot_mem_contiguous()) - pr_err("Can't have holes in boot memory area.\n"); - else if (!is_fadump_reserved_mem_contiguous()) + if (!is_fadump_reserved_mem_contiguous()) pr_err("Can't have holes in reserved memory area.\n"); pr_err("Failed to register. Parameter Error(%d).\n", rc); @@ -316,11 +366,9 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf) u32 num_cpus, *note_buf; int i, rc = 0, cpu = 0; struct pt_regs regs; - unsigned long addr; void *vaddr; - addr = be64_to_cpu(fdm_active->cpu_state_data.destination_address); - vaddr = __va(addr); + vaddr = (void *)fadump_conf->cpu_state_dest_vaddr; reg_header = vaddr; if (be64_to_cpu(reg_header->magic_number) != @@ -375,11 +423,8 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf) } final_note(note_buf); - if (fdh) { - pr_debug("Updating elfcore header (%llx) with cpu notes\n", - fdh->elfcorehdr_addr); - fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); - } + pr_debug("Updating elfcore header (%llx) with cpu notes\n", fadump_conf->elfcorehdr_addr); + fadump_update_elfcore_header((char *)fadump_conf->elfcorehdr_addr); return 0; error_out: @@ -389,57 +434,66 @@ error_out: } /* - * Validate and process the dump data stored by firmware before exporting - * it through '/proc/vmcore'. + * Validate and process the dump data stored by the firmware, and update + * the CPU notes of elfcorehdr. */ static int __init rtas_fadump_process(struct fw_dump *fadump_conf) { - struct fadump_crash_info_header *fdh; - int rc = 0; - if (!fdm_active || !fadump_conf->fadumphdr_addr) return -EINVAL; /* Check if the dump data is valid. */ - if ((be16_to_cpu(fdm_active->header.dump_status_flag) == - RTAS_FADUMP_ERROR_FLAG) || - (fdm_active->cpu_state_data.error_flags != 0) || - (fdm_active->rmr_region.error_flags != 0)) { - pr_err("Dump taken by platform is not valid\n"); - return -EINVAL; - } - if ((fdm_active->rmr_region.bytes_dumped != - fdm_active->rmr_region.source_len) || - !fdm_active->cpu_state_data.bytes_dumped) { - pr_err("Dump taken by platform is incomplete\n"); - return -EINVAL; - } + for (int i = 0; i < be16_to_cpu(fdm_active->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm_active->rgn[i].source_data_type); + int rc = 0; - /* Validate the fadump crash info header */ - fdh = __va(fadump_conf->fadumphdr_addr); - if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { - pr_err("Crash info header is not valid.\n"); - return -EINVAL; + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + case RTAS_FADUMP_HPTE_REGION: + case RTAS_FADUMP_REAL_MODE_REGION: + if (fdm_active->rgn[i].error_flags != 0) { + pr_err("Dump taken by platform is not valid (%d)\n", i); + rc = -EINVAL; + } + if (fdm_active->rgn[i].bytes_dumped != fdm_active->rgn[i].source_len) { + pr_err("Dump taken by platform is incomplete (%d)\n", i); + rc = -EINVAL; + } + if (rc) { + pr_warn("Region type: %u src addr: 0x%llx dest addr: 0x%llx\n", + be16_to_cpu(fdm_active->rgn[i].source_data_type), + be64_to_cpu(fdm_active->rgn[i].source_address), + be64_to_cpu(fdm_active->rgn[i].destination_address)); + return rc; + } + break; + case RTAS_FADUMP_PARAM_AREA: + if (fdm_active->rgn[i].bytes_dumped != fdm_active->rgn[i].source_len || + fdm_active->rgn[i].error_flags != 0) { + pr_warn("Failed to process additional parameters! Proceeding anyway..\n"); + fadump_conf->param_area = 0; + } + break; + default: + /* + * If the first/crashed kernel added a new region type that the + * second/fadump kernel doesn't recognize, skip it and process + * assuming backward compatibility. + */ + pr_warn("Unknown region found: type: %u src addr: 0x%llx dest addr: 0x%llx\n", + be16_to_cpu(fdm_active->rgn[i].source_data_type), + be64_to_cpu(fdm_active->rgn[i].source_address), + be64_to_cpu(fdm_active->rgn[i].destination_address)); + break; + } } - rc = rtas_fadump_build_cpu_notes(fadump_conf); - if (rc) - return rc; - - /* - * We are done validating dump info and elfcore header is now ready - * to be exported. set elfcorehdr_addr so that vmcore module will - * export the elfcore header through '/proc/vmcore'. - */ - elfcorehdr_addr = fdh->elfcorehdr_addr; - - return 0; + return rtas_fadump_build_cpu_notes(fadump_conf); } static void rtas_fadump_region_show(struct fw_dump *fadump_conf, struct seq_file *m) { - const struct rtas_fadump_section *cpu_data_section; const struct rtas_fadump_mem_struct *fdm_ptr; if (fdm_active) @@ -447,27 +501,49 @@ static void rtas_fadump_region_show(struct fw_dump *fadump_conf, else fdm_ptr = &fdm; - cpu_data_section = &(fdm_ptr->cpu_state_data); - seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", - be64_to_cpu(cpu_data_section->destination_address), - be64_to_cpu(cpu_data_section->destination_address) + - be64_to_cpu(cpu_data_section->source_len) - 1, - be64_to_cpu(cpu_data_section->source_len), - be64_to_cpu(cpu_data_section->bytes_dumped)); - - seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", - be64_to_cpu(fdm_ptr->hpte_region.destination_address), - be64_to_cpu(fdm_ptr->hpte_region.destination_address) + - be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1, - be64_to_cpu(fdm_ptr->hpte_region.source_len), - be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped)); - - seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", - be64_to_cpu(fdm_ptr->rmr_region.source_address), - be64_to_cpu(fdm_ptr->rmr_region.destination_address)); - seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", - be64_to_cpu(fdm_ptr->rmr_region.source_len), - be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); + + for (int i = 0; i < be16_to_cpu(fdm_ptr->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm_ptr->rgn[i].source_data_type); + + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_HPTE_REGION: + seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_REAL_MODE_REGION: + seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", + be64_to_cpu(fdm_ptr->rgn[i].source_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address)); + seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_PARAM_AREA: + seq_printf(m, "\n[%#016llx-%#016llx]: cmdline append: '%s'\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + (char *)__va(be64_to_cpu(fdm_ptr->rgn[i].destination_address))); + break; + default: + seq_printf(m, "Unknown region type %d : Src: %#016llx, Dest: %#016llx, ", + type, be64_to_cpu(fdm_ptr->rgn[i].source_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address)); + break; + } + } /* Dump is active. Show preserved area start address. */ if (fdm_active) { @@ -483,6 +559,20 @@ static void rtas_fadump_trigger(struct fadump_crash_info_header *fdh, rtas_os_term((char *)msg); } +/* FADUMP_MAX_MEM_REGS or lower */ +static int rtas_fadump_max_boot_mem_rgns(void) +{ + /* + * Version 1 of Kernel Assisted Dump Memory Structure (PAPR) supports 10 sections. + * With one each section taken for CPU state data & HPTE respectively, 8 sections + * can be used for boot memory regions. + * + * If new region(s) is(are) defined, maximum boot memory regions will decrease + * proportionally. + */ + return RTAS_FADUMP_MAX_BOOT_MEM_REGS; +} + static struct fadump_ops rtas_fadump_ops = { .fadump_init_mem_struct = rtas_fadump_init_mem_struct, .fadump_get_bootmem_min = rtas_fadump_get_bootmem_min, @@ -492,6 +582,7 @@ static struct fadump_ops rtas_fadump_ops = { .fadump_process = rtas_fadump_process, .fadump_region_show = rtas_fadump_region_show, .fadump_trigger = rtas_fadump_trigger, + .fadump_max_boot_mem_rgns = rtas_fadump_max_boot_mem_rgns, }; void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) @@ -508,9 +599,10 @@ void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) if (!token) return; - fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token); - fadump_conf->ops = &rtas_fadump_ops; - fadump_conf->fadump_supported = 1; + fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token); + fadump_conf->ops = &rtas_fadump_ops; + fadump_conf->fadump_supported = 1; + fadump_conf->param_area_supported = 1; /* Firmware supports 64-bit value for size, align it to pagesize. */ fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE); diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.h b/arch/powerpc/platforms/pseries/rtas-fadump.h index fd59bd7ca9c3..c109abf6befd 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.h +++ b/arch/powerpc/platforms/pseries/rtas-fadump.h @@ -23,12 +23,24 @@ #define RTAS_FADUMP_HPTE_REGION 0x0002 #define RTAS_FADUMP_REAL_MODE_REGION 0x0011 +/* OS defined sections */ +#define RTAS_FADUMP_PARAM_AREA 0x0100 + /* Dump request flag */ #define RTAS_FADUMP_REQUEST_FLAG 0x00000001 /* Dump status flag */ #define RTAS_FADUMP_ERROR_FLAG 0x2000 +/* + * The Firmware Assisted Dump Memory structure supports a maximum of 10 sections + * in the dump memory structure. Presently, three sections are used for + * CPU state data, HPTE & Parameters area, while the remaining seven sections + * can be used for boot memory regions. + */ +#define MAX_SECTIONS 10 +#define RTAS_FADUMP_MAX_BOOT_MEM_REGS 7 + /* Kernel Dump section info */ struct rtas_fadump_section { __be32 request_flag; @@ -61,20 +73,15 @@ struct rtas_fadump_section_header { * Firmware Assisted dump memory structure. This structure is required for * registering future kernel dump with power firmware through rtas call. * - * No disk dump option. Hence disk dump path string section is not included. + * In version 1, the platform permits one section header, dump-disk path + * and ten sections. + * + * Note: No disk dump option. Hence disk dump path string section is not + * included. */ struct rtas_fadump_mem_struct { struct rtas_fadump_section_header header; - - /* Kernel dump sections */ - struct rtas_fadump_section cpu_state_data; - struct rtas_fadump_section hpte_region; - - /* - * TODO: Extend multiple boot memory regions support in the kernel - * for this platform. - */ - struct rtas_fadump_section rmr_region; + struct rtas_fadump_section rgn[MAX_SECTIONS]; }; /* diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 284a6fa04b0c..b10a25325238 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -343,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void) { void (*ctor)(void *) = get_dtl_cache_ctor(); - dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, - DISPATCH_LOG_BYTES, 0, ctor); + dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor); if (!dtl_cache) { pr_warn("Failed to create dispatch trace log buffer cache\n"); pr_warn("Stolen time statistics will be unreliable\n"); @@ -1159,7 +1159,6 @@ define_machine(pseries) { .machine_check_exception = pSeries_machine_check_exception, .machine_check_log_err = pSeries_machine_check_log_err, #ifdef CONFIG_KEXEC_CORE - .machine_kexec = pseries_machine_kexec, .kexec_cpu_down = pseries_kexec_cpu_down, #endif #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index c597711ef20a..db99725e752b 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -39,7 +39,7 @@ #include <asm/xive.h> #include <asm/dbell.h> #include <asm/plpar_wrappers.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/svm.h> #include <asm/kvm_guest.h> diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index 3b4045d508ec..384c9dc1899a 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -8,6 +8,7 @@ #include <linux/mm.h> #include <linux/memblock.h> +#include <linux/mem_encrypt.h> #include <linux/cc_platform.h> #include <asm/machdep.h> #include <asm/svm.h> diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c index f9f682724e77..9e05a0e99cad 100644 --- a/arch/powerpc/platforms/pseries/vas-sysfs.c +++ b/arch/powerpc/platforms/pseries/vas-sysfs.c @@ -162,13 +162,13 @@ static const struct sysfs_ops vas_sysfs_ops = { .store = vas_type_store, }; -static struct kobj_type vas_def_attr_type = { +static const struct kobj_type vas_def_attr_type = { .release = vas_type_release, .sysfs_ops = &vas_sysfs_ops, .default_groups = vas_def_capab_groups, }; -static struct kobj_type vas_qos_attr_type = { +static const struct kobj_type vas_qos_attr_type = { .release = vas_type_release, .sysfs_ops = &vas_sysfs_ops, .default_groups = vas_qos_capab_groups, diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index 71d52a670d95..c25eb1a38185 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -38,7 +38,27 @@ static long hcall_return_busy_check(long rc) { /* Check if we are stalled for some time */ if (H_IS_LONG_BUSY(rc)) { - msleep(get_longbusy_msecs(rc)); + unsigned int ms; + /* + * Allocate, Modify and Deallocate HCALLs returns + * H_LONG_BUSY_ORDER_1_MSEC or H_LONG_BUSY_ORDER_10_MSEC + * for the long delay. So the sleep time should always + * be either 1 or 10msecs, but in case if the HCALL + * returns the long delay > 10 msecs, clamp the sleep + * time to 10msecs. + */ + ms = clamp(get_longbusy_msecs(rc), 1, 10); + + /* + * msleep() will often sleep at least 20 msecs even + * though the hypervisor suggests that the OS reissue + * HCALLs after 1 or 10msecs. Also the delay hint from + * the HCALL is just a suggestion. So OK to pause for + * less time than the hinted delay. Use usleep_range() + * to ensure we don't sleep much longer than actually + * needed. + */ + usleep_range(ms * (USEC_PER_MSEC / 10), ms * USEC_PER_MSEC); rc = H_BUSY; } else if (rc == H_BUSY) { cond_resched(); @@ -228,7 +248,7 @@ static irqreturn_t pseries_vas_irq_handler(int irq, void *data) struct pseries_vas_window *txwin = data; /* - * The thread hanlder will process this interrupt if it is + * The thread handler will process this interrupt if it is * already running. */ atomic_inc(&txwin->pending_faults); diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 90ff85c879bf..ac1d2d2c9a88 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -611,7 +611,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = { .get_required_mask = dma_iommu_get_required_mask, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; @@ -1576,10 +1576,10 @@ void vio_unregister_device(struct vio_dev *viodev) } EXPORT_SYMBOL(vio_unregister_device); -static int vio_bus_match(struct device *dev, struct device_driver *drv) +static int vio_bus_match(struct device *dev, const struct device_driver *drv) { const struct vio_dev *vio_dev = to_vio_dev(dev); - struct vio_driver *vio_drv = to_vio_driver(drv); + const struct vio_driver *vio_drv = to_vio_driver(drv); const struct vio_device_id *ids = vio_drv->id_table; return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); @@ -1592,13 +1592,9 @@ static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env) const char *cp; dn = dev->of_node; - if (!dn) - return -ENODEV; - cp = of_get_property(dn, "compatible", NULL); - if (!cp) - return -ENODEV; + if (dn && (cp = of_get_property(dn, "compatible", NULL))) + add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); - add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); return 0; } @@ -1693,7 +1689,7 @@ struct vio_dev *vio_find_node(struct device_node *vnode) /* construct the kobject name from the device node */ if (of_node_is_type(vnode_parent, "vdevice")) { const __be32 *prop; - + prop = of_get_property(vnode, "reg", NULL); if (!prop) goto out; |