From 8d0ea29db5aefd0d94fa4b6ca6124c68998f3c6a Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 22:58:40 -0700 Subject: powerpc/xive: Define xive_native_alloc_irq_on_chip() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function allocates IRQ on a specific chip. VAS needs per chip IRQ allocation and will have IRQ handler per VAS instance. Signed-off-by: Haren Myneni Reviewed-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587016720.2275.1047.camel@hbabu-laptop --- arch/powerpc/include/asm/xive.h | 9 ++++++++- arch/powerpc/sysdev/xive/native.c | 6 +++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 93f982dbb3d4..d08ea11b271c 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -5,6 +5,8 @@ #ifndef _ASM_POWERPC_XIVE_H #define _ASM_POWERPC_XIVE_H +#include + #define XIVE_INVALID_VP 0xffffffff #ifdef CONFIG_PPC_XIVE @@ -108,7 +110,6 @@ void xive_native_free_vp_block(u32 vp_base); int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data); void xive_cleanup_irq_data(struct xive_irq_data *xd); -u32 xive_native_alloc_irq(void); void xive_native_free_irq(u32 irq); int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq); @@ -137,6 +138,12 @@ int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle, u32 qindex); int xive_native_get_vp_state(u32 vp_id, u64 *out_state); bool xive_native_has_queue_state_support(void); +extern u32 xive_native_alloc_irq_on_chip(u32 chip_id); + +static inline u32 xive_native_alloc_irq(void) +{ + return xive_native_alloc_irq_on_chip(OPAL_XIVE_ANY_CHIP); +} #else diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 5218fdc4b29a..71b881e554fc 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -280,12 +280,12 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc) } #endif /* CONFIG_SMP */ -u32 xive_native_alloc_irq(void) +u32 xive_native_alloc_irq_on_chip(u32 chip_id) { s64 rc; for (;;) { - rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP); + rc = opal_xive_allocate_irq(chip_id); if (rc != OPAL_BUSY) break; msleep(OPAL_BUSY_DELAY_MS); @@ -294,7 +294,7 @@ u32 xive_native_alloc_irq(void) return 0; return rc; } -EXPORT_SYMBOL_GPL(xive_native_alloc_irq); +EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip); void xive_native_free_irq(u32 irq) { -- cgit v1.2.3-59-g8ed1b From 73a8077938824bf13dd9b8557545be91012ffc9c Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 22:59:29 -0700 Subject: powerpc/vas: Define nx_fault_stamp in coprocessor_request_block Kernel sets fault address and status in CRB for NX page fault on user space address after processing page fault. User space gets the signal and handles the fault mentioned in CRB by bringing the page in to memory and send NX request again. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587016769.2275.1048.camel@hbabu-laptop --- arch/powerpc/include/asm/icswx.h | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h index 9872f85d356f..965b1f39b2a5 100644 --- a/arch/powerpc/include/asm/icswx.h +++ b/arch/powerpc/include/asm/icswx.h @@ -108,6 +108,17 @@ struct data_descriptor_entry { __be64 address; } __packed __aligned(DDE_ALIGN); +/* 4.3.2 NX-stamped Fault CRB */ + +#define NX_STAMP_ALIGN (0x10) + +struct nx_fault_stamp { + __be64 fault_storage_addr; + __be16 reserved; + __u8 flags; + __u8 fault_status; + __be32 pswid; +} __packed __aligned(NX_STAMP_ALIGN); /* Chapter 6.5.2 Coprocessor-Request Block (CRB) */ @@ -135,10 +146,15 @@ struct coprocessor_request_block { struct coprocessor_completion_block ccb; - u8 reserved[48]; + union { + struct nx_fault_stamp nx; + u8 reserved[16]; + } stamp; + + u8 reserved[32]; struct coprocessor_status_block csb; -} __packed __aligned(CRB_ALIGN); +} __packed; /* RFC02167 Initiate Coprocessor Instructions document -- cgit v1.2.3-59-g8ed1b From c20e1e299d936c7f3089c9be99fa784e57da5b7f Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:00:06 -0700 Subject: powerpc/vas: Alloc and setup IRQ and trigger port address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocate a xive irq on each chip with a vas instance. The NX coprocessor raises a host CPU interrupt via vas if it encounters page fault on user space request buffer. Subsequent patches register the trigger port with the NX coprocessor, and create a vas fault handler for this interrupt mapping. Signed-off-by: Haren Myneni Reviewed-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587016806.2275.1050.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas.c | 44 +++++++++++++++++++++++++++++++----- arch/powerpc/platforms/powernv/vas.h | 2 ++ 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c index ed9cc6df329a..3303cfe4cfda 100644 --- a/arch/powerpc/platforms/powernv/vas.c +++ b/arch/powerpc/platforms/powernv/vas.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "vas.h" @@ -25,10 +26,12 @@ static DEFINE_PER_CPU(int, cpu_vas_id); static int init_vas_instance(struct platform_device *pdev) { - int rc, cpu, vasid; - struct resource *res; - struct vas_instance *vinst; struct device_node *dn = pdev->dev.of_node; + struct vas_instance *vinst; + struct xive_irq_data *xd; + uint32_t chipid, hwirq; + struct resource *res; + int rc, cpu, vasid; rc = of_property_read_u32(dn, "ibm,vas-id", &vasid); if (rc) { @@ -36,6 +39,12 @@ static int init_vas_instance(struct platform_device *pdev) return -ENODEV; } + rc = of_property_read_u32(dn, "ibm,chip-id", &chipid); + if (rc) { + pr_err("No ibm,chip-id property for %s?\n", pdev->name); + return -ENODEV; + } + if (pdev->num_resources != 4) { pr_err("Unexpected DT configuration for [%s, %d]\n", pdev->name, vasid); @@ -69,9 +78,32 @@ static int init_vas_instance(struct platform_device *pdev) vinst->paste_win_id_shift = 63 - res->end; - pr_devel("Initialized instance [%s, %d], paste_base 0x%llx, " - "paste_win_id_shift 0x%llx\n", pdev->name, vasid, - vinst->paste_base_addr, vinst->paste_win_id_shift); + hwirq = xive_native_alloc_irq_on_chip(chipid); + if (!hwirq) { + pr_err("Inst%d: Unable to allocate global irq for chip %d\n", + vinst->vas_id, chipid); + return -ENOENT; + } + + vinst->virq = irq_create_mapping(NULL, hwirq); + if (!vinst->virq) { + pr_err("Inst%d: Unable to map global irq %d\n", + vinst->vas_id, hwirq); + return -EINVAL; + } + + xd = irq_get_handler_data(vinst->virq); + if (!xd) { + pr_err("Inst%d: Invalid virq %d\n", + vinst->vas_id, vinst->virq); + return -EINVAL; + } + + vinst->irq_port = xd->trig_page; + pr_devel("Initialized instance [%s, %d] paste_base 0x%llx paste_win_id_shift 0x%llx IRQ %d Port 0x%llx\n", + pdev->name, vasid, vinst->paste_base_addr, + vinst->paste_win_id_shift, vinst->virq, + vinst->irq_port); for_each_possible_cpu(cpu) { if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn)) diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 5574aec9ee88..598608b53fab 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -313,6 +313,8 @@ struct vas_instance { u64 paste_base_addr; u64 paste_win_id_shift; + u64 irq_port; + int virq; struct mutex mutex; struct vas_window *rxwin[VAS_COP_TYPE_MAX]; struct vas_window *windows[VAS_WINDOWS_PER_CHIP]; -- cgit v1.2.3-59-g8ed1b From 0d17de03ce6a7a9b08c868211f1e9a7cf1ced8c4 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:00:46 -0700 Subject: powerpc/vas: Setup fault window per VAS instance Setup fault window for each VAS instance. When NX gets a fault on request buffer, pastes fault CRB in the corresponding fault FIFO and then raises an interrupt to the OS. The kernel handles this fault and process faults CRB from this FIFO. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587016846.2275.1053.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/Makefile | 2 +- arch/powerpc/platforms/powernv/vas-fault.c | 77 +++++++++++++++++++++++++++++ arch/powerpc/platforms/powernv/vas-window.c | 4 +- arch/powerpc/platforms/powernv/vas.c | 20 ++++++++ arch/powerpc/platforms/powernv/vas.h | 21 ++++++++ 5 files changed, 121 insertions(+), 3 deletions(-) create mode 100644 arch/powerpc/platforms/powernv/vas-fault.c diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index c0f8120045c3..395789ffc482 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o obj-$(CONFIG_OPAL_PRD) += opal-prd.o obj-$(CONFIG_PERF_EVENTS) += opal-imc.o obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o -obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o +obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o obj-$(CONFIG_OCXL_BASE) += ocxl.o obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o obj-$(CONFIG_PPC_SECURE_BOOT) += opal-secvar.o diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c new file mode 100644 index 000000000000..404499875525 --- /dev/null +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * VAS Fault handling. + * Copyright 2019, IBM Corporation + */ + +#define pr_fmt(fmt) "vas: " fmt + +#include +#include +#include +#include +#include +#include + +#include "vas.h" + +/* + * The maximum FIFO size for fault window can be 8MB + * (VAS_RX_FIFO_SIZE_MAX). Using 4MB FIFO since each VAS + * instance will be having fault window. + * 8MB FIFO can be used if expects more faults for each VAS + * instance. + */ +#define VAS_FAULT_WIN_FIFO_SIZE (4 << 20) + +/* + * Fault window is opened per VAS instance. NX pastes fault CRB in fault + * FIFO upon page faults. + */ +int vas_setup_fault_window(struct vas_instance *vinst) +{ + struct vas_rx_win_attr attr; + + vinst->fault_fifo_size = VAS_FAULT_WIN_FIFO_SIZE; + vinst->fault_fifo = kzalloc(vinst->fault_fifo_size, GFP_KERNEL); + if (!vinst->fault_fifo) { + pr_err("Unable to alloc %d bytes for fault_fifo\n", + vinst->fault_fifo_size); + return -ENOMEM; + } + + /* + * Invalidate all CRB entries. NX pastes valid entry for each fault. + */ + memset(vinst->fault_fifo, FIFO_INVALID_ENTRY, vinst->fault_fifo_size); + vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT); + + attr.rx_fifo_size = vinst->fault_fifo_size; + attr.rx_fifo = vinst->fault_fifo; + + /* + * Max creds is based on number of CRBs can fit in the FIFO. + * (fault_fifo_size/CRB_SIZE). If 8MB FIFO is used, max creds + * will be 0xffff since the receive creds field is 16bits wide. + */ + attr.wcreds_max = vinst->fault_fifo_size / CRB_SIZE; + attr.lnotify_lpid = 0; + attr.lnotify_pid = mfspr(SPRN_PID); + attr.lnotify_tid = mfspr(SPRN_PID); + + vinst->fault_win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT, + &attr); + + if (IS_ERR(vinst->fault_win)) { + pr_err("VAS: Error %ld opening FaultWin\n", + PTR_ERR(vinst->fault_win)); + kfree(vinst->fault_fifo); + return PTR_ERR(vinst->fault_win); + } + + pr_devel("VAS: Created FaultWin %d, LPID/PID/TID [%d/%d/%d]\n", + vinst->fault_win->winid, attr.lnotify_lpid, + attr.lnotify_pid, attr.lnotify_tid); + + return 0; +} diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 0c0d27d17976..1783fa9b4298 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -827,9 +827,9 @@ void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop) rxattr->fault_win = true; rxattr->notify_disable = true; rxattr->rx_wcred_mode = true; - rxattr->tx_wcred_mode = true; rxattr->rx_win_ord_mode = true; - rxattr->tx_win_ord_mode = true; + rxattr->rej_no_credit = true; + rxattr->tc_mode = VAS_THRESH_DISABLED; } else if (cop == VAS_COP_TYPE_FTW) { rxattr->user_win = true; rxattr->intr_disable = true; diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c index 3303cfe4cfda..9013a6344aec 100644 --- a/arch/powerpc/platforms/powernv/vas.c +++ b/arch/powerpc/platforms/powernv/vas.c @@ -24,6 +24,11 @@ static LIST_HEAD(vas_instances); static DEFINE_PER_CPU(int, cpu_vas_id); +static int vas_irq_fault_window_setup(struct vas_instance *vinst) +{ + return vas_setup_fault_window(vinst); +} + static int init_vas_instance(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; @@ -114,6 +119,21 @@ static int init_vas_instance(struct platform_device *pdev) list_add(&vinst->node, &vas_instances); mutex_unlock(&vas_mutex); + /* + * IRQ and fault handling setup is needed only for user space + * send windows. + */ + if (vinst->virq) { + rc = vas_irq_fault_window_setup(vinst); + /* + * Fault window is used only for user space send windows. + * So if vinst->virq is NULL, tx_win_open returns -ENODEV + * for user space. + */ + if (rc) + vinst->virq = 0; + } + vas_instance_init_dbgdir(vinst); dev_set_drvdata(&pdev->dev, vinst); diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 598608b53fab..9c8e3f588ada 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -295,6 +295,22 @@ enum vas_notify_after_count { VAS_NOTIFY_AFTER_2 }; +/* + * NX can generate an interrupt for multiple faults and expects kernel + * to process all of them. So read all valid CRB entries until find the + * invalid one. So use pswid which is pasted by NX and ccw[0] (reserved + * bit in BE) to check valid CRB. CCW[0] will not be touched by user + * space. Application gets CRB formt error if it updates this bit. + * + * Invalidate FIFO during allocation and process all entries from last + * successful read until finds invalid pswid and ccw[0] values. + * After reading each CRB entry from fault FIFO, the kernel invalidate + * it by updating pswid with FIFO_INVALID_ENTRY and CCW[0] with + * CCW0_INVALID. + */ +#define FIFO_INVALID_ENTRY 0xffffffff +#define CCW0_INVALID 1 + /* * One per instance of VAS. Each instance will have a separate set of * receive windows, one per coprocessor type. @@ -315,6 +331,10 @@ struct vas_instance { u64 irq_port; int virq; + int fault_fifo_size; + void *fault_fifo; + struct vas_window *fault_win; /* Fault window */ + struct mutex mutex; struct vas_window *rxwin[VAS_COP_TYPE_MAX]; struct vas_window *windows[VAS_WINDOWS_PER_CHIP]; @@ -408,6 +428,7 @@ extern void vas_init_dbgdir(void); extern void vas_instance_init_dbgdir(struct vas_instance *vinst); extern void vas_window_init_dbgdir(struct vas_window *win); extern void vas_window_free_dbgdir(struct vas_window *win); +extern int vas_setup_fault_window(struct vas_instance *vinst); static inline void vas_log_write(struct vas_window *win, char *name, void *regptr, u64 val) -- cgit v1.2.3-59-g8ed1b From 8b8a73dc7919489b54ccd2772b2948088d3dbe6d Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:01:28 -0700 Subject: powerpc/vas: Register NX with fault window ID and IRQ port value For each user space send window, register NX with fault window ID and port value so that NX paste CRBs in this fault FIFO when it sees fault on the request buffer. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587016888.2275.1054.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-window.c | 15 +++++++++++++-- arch/powerpc/platforms/powernv/vas.h | 15 +++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 1783fa9b4298..dc46bf6d6149 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -373,7 +373,7 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) init_xlate_regs(window, winctx->user_win); val = 0ULL; - val = SET_FIELD(VAS_FAULT_TX_WIN, val, 0); + val = SET_FIELD(VAS_FAULT_TX_WIN, val, winctx->fault_win_id); write_hvwc_reg(window, VREG(FAULT_TX_WIN), val); /* In PowerNV, interrupts go to HV. */ @@ -748,6 +748,8 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin, winctx->min_scope = VAS_SCOPE_LOCAL; winctx->max_scope = VAS_SCOPE_VECTORED_GROUP; + if (rxwin->vinst->virq) + winctx->irq_port = rxwin->vinst->irq_port; } static bool rx_win_args_valid(enum vas_cop_type cop, @@ -944,13 +946,22 @@ static void init_winctx_for_txwin(struct vas_window *txwin, winctx->lpid = txattr->lpid; winctx->pidr = txattr->pidr; winctx->rx_win_id = txwin->rxwin->winid; + /* + * IRQ and fault window setup is successful. Set fault window + * for the send window so that ready to handle faults. + */ + if (txwin->vinst->virq) + winctx->fault_win_id = txwin->vinst->fault_win->winid; winctx->dma_type = VAS_DMA_TYPE_INJECT; winctx->tc_mode = txattr->tc_mode; winctx->min_scope = VAS_SCOPE_LOCAL; winctx->max_scope = VAS_SCOPE_VECTORED_GROUP; + if (txwin->vinst->virq) + winctx->irq_port = txwin->vinst->irq_port; - winctx->pswid = 0; + winctx->pswid = txattr->pswid ? txattr->pswid : + encode_pswid(txwin->vinst->vas_id, txwin->winid); } static bool tx_win_args_valid(enum vas_cop_type cop, diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 9c8e3f588ada..88d084d3bfd9 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -467,6 +467,21 @@ static inline u64 read_hvwc_reg(struct vas_window *win, return in_be64(win->hvwc_map+reg); } +/* + * Encode/decode the Partition Send Window ID (PSWID) for a window in + * a way that we can uniquely identify any window in the system. i.e. + * we should be able to locate the 'struct vas_window' given the PSWID. + * + * Bits Usage + * 0:7 VAS id (8 bits) + * 8:15 Unused, 0 (3 bits) + * 16:31 Window id (16 bits) + */ +static inline u32 encode_pswid(int vasid, int winid) +{ + return ((u32)winid | (vasid << (31 - 7))); +} + static inline void decode_pswid(u32 pswid, int *vasid, int *winid) { if (vasid) -- cgit v1.2.3-59-g8ed1b From db1c08a7406351673c52594f5c8a65829baf5bf6 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:02:16 -0700 Subject: powerpc/vas: Take reference to PID and mm for user space windows When process opens a window, its pid and tgid will be saved in the vas_window struct. This window will be closed when the process exits. The kernel handles NX faults by updating CSB or send SEGV signal to pid of the process if the userspace csb addr is invalid. In multi-thread applications, a window can be opened by a child thread, but it will not be closed when this thread exits. It is expected that the parent will clean up all resources including NX windows opened by child threads. A child thread can send NX requests using this window and could be killed before completion is reported. If the pid assigned to this thread is reused while requests are pending, a failure SEGV would be directed to the wrong place. To prevent reusing the pid, take references to pid and mm when the window is opened and release them when when the window is closed. Then if child thread is not running, SEGV signal will be sent to thread group leader (tgid). Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587016936.2275.1057.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-debug.c | 2 +- arch/powerpc/platforms/powernv/vas-window.c | 50 ++++++++++++++++++++++++++--- arch/powerpc/platforms/powernv/vas.h | 9 +++++- 3 files changed, 55 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c index 44035a3d6414..41fa90d2f4ab 100644 --- a/arch/powerpc/platforms/powernv/vas-debug.c +++ b/arch/powerpc/platforms/powernv/vas-debug.c @@ -38,7 +38,7 @@ static int info_show(struct seq_file *s, void *private) seq_printf(s, "Type: %s, %s\n", cop_to_str(window->cop), window->tx_win ? "Send" : "Receive"); - seq_printf(s, "Pid : %d\n", window->pid); + seq_printf(s, "Pid : %d\n", vas_window_pid(window)); unlock: mutex_unlock(&vas_mutex); diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index dc46bf6d6149..063cda2b540f 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include "vas.h" @@ -876,8 +878,6 @@ struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop, rxwin->user_win = rxattr->user_win; rxwin->cop = cop; rxwin->wcreds_max = rxattr->wcreds_max ?: VAS_WCREDS_DEFAULT; - if (rxattr->user_win) - rxwin->pid = task_pid_vnr(current); init_winctx_for_rxwin(rxwin, rxattr, &winctx); init_winctx_regs(rxwin, &winctx); @@ -1027,7 +1027,6 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, txwin->tx_win = 1; txwin->rxwin = rxwin; txwin->nx_win = txwin->rxwin->nx_win; - txwin->pid = attr->pid; txwin->user_win = attr->user_win; txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT; @@ -1057,6 +1056,40 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, rc = set_thread_uses_vas(); if (rc) goto free_window; + + /* + * Window opened by a child thread may not be closed when + * it exits. So take reference to its pid and release it + * when the window is free by parent thread. + * Acquire a reference to the task's pid to make sure + * pid will not be re-used - needed only for multithread + * applications. + */ + txwin->pid = get_task_pid(current, PIDTYPE_PID); + /* + * Acquire a reference to the task's mm. + */ + txwin->mm = get_task_mm(current); + + if (!txwin->mm) { + put_pid(txwin->pid); + pr_err("VAS: pid(%d): mm_struct is not found\n", + current->pid); + rc = -EPERM; + goto free_window; + } + + mmgrab(txwin->mm); + mmput(txwin->mm); + mm_context_add_copro(txwin->mm); + /* + * Process closes window during exit. In the case of + * multithread application, the child thread can open + * window and can exit without closing it. Expects parent + * thread to use and close the window. So do not need + * to take pid reference for parent thread. + */ + txwin->tgid = find_get_pid(task_tgid_vnr(current)); } set_vinst_win(vinst, txwin); @@ -1257,8 +1290,17 @@ int vas_win_close(struct vas_window *window) poll_window_castout(window); /* if send window, drop reference to matching receive window */ - if (window->tx_win) + if (window->tx_win) { + if (window->user_win) { + /* Drop references to pid and mm */ + put_pid(window->pid); + if (window->mm) { + mm_context_remove_copro(window->mm); + mmdrop(window->mm); + } + } put_rx_win(window->rxwin); + } vas_window_free(window); diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 88d084d3bfd9..2a040722dffa 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -355,7 +355,9 @@ struct vas_window { bool user_win; /* True if user space window */ void *hvwc_map; /* HV window context */ void *uwc_map; /* OS/User window context */ - pid_t pid; /* Linux process id of owner */ + struct pid *pid; /* Linux process id of owner */ + struct pid *tgid; /* Thread group ID of owner */ + struct mm_struct *mm; /* Linux process mm_struct */ int wcreds_max; /* Window credits */ char *dbgname; @@ -430,6 +432,11 @@ extern void vas_window_init_dbgdir(struct vas_window *win); extern void vas_window_free_dbgdir(struct vas_window *win); extern int vas_setup_fault_window(struct vas_instance *vinst); +static inline int vas_window_pid(struct vas_window *window) +{ + return pid_vnr(window->pid); +} + static inline void vas_log_write(struct vas_window *win, char *name, void *regptr, u64 val) { -- cgit v1.2.3-59-g8ed1b From 9774628acf86409771acad6269ad24ea31ddb4b3 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:03:02 -0700 Subject: powerpc/vas: Setup thread IRQ handler per VAS instance When NX encounters translation error on CRB and any request buffer, raises an interrupt on the CPU to handle the fault. It can raise one interrupt for multiple faults. Expects OS to handle these faults and return credits for fault window after processing faults. Setup thread IRQ handler and IRQ thread function per each VAS instance. IRQ handler checks if the thread is already woken up and can handle new faults. If so returns with IRQ_HANDLED, otherwise wake up thread to process new faults. The thread functions reads each CRB entry from fault FIFO until sees invalid entry. After reading each CRB, determine the corresponding send window using pswid (from CRB) and process fault CRB. Then invalidate the entry and return credit. Processing fault CRB and return credit is described in subsequent patches. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587016982.2275.1060.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-fault.c | 131 ++++++++++++++++++++++++++++ arch/powerpc/platforms/powernv/vas-window.c | 60 +++++++++++++ arch/powerpc/platforms/powernv/vas.c | 23 ++++- arch/powerpc/platforms/powernv/vas.h | 7 ++ 4 files changed, 220 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index 404499875525..0da8358b357d 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "vas.h" @@ -24,6 +25,136 @@ */ #define VAS_FAULT_WIN_FIFO_SIZE (4 << 20) +/* + * Process valid CRBs in fault FIFO. + * NX process user space requests, return credit and update the status + * in CRB. If it encounters transalation error when accessing CRB or + * request buffers, raises interrupt on the CPU to handle the fault. + * It takes credit on fault window, updates nx_fault_stamp in CRB with + * the following information and pastes CRB in fault FIFO. + * + * pswid - window ID of the window on which the request is sent. + * fault_storage_addr - fault address + * + * It can raise a single interrupt for multiple faults. Expects OS to + * process all valid faults and return credit for each fault on user + * space and fault windows. This fault FIFO control will be done with + * credit mechanism. NX can continuously paste CRBs until credits are not + * available on fault window. Otherwise, returns with RMA_reject. + * + * Total credits available on fault window: FIFO_SIZE(4MB)/CRBS_SIZE(128) + * + */ +irqreturn_t vas_fault_thread_fn(int irq, void *data) +{ + struct vas_instance *vinst = data; + struct coprocessor_request_block *crb, *entry; + struct coprocessor_request_block buf; + struct vas_window *window; + unsigned long flags; + void *fifo; + + crb = &buf; + + /* + * VAS can interrupt with multiple page faults. So process all + * valid CRBs within fault FIFO until reaches invalid CRB. + * We use CCW[0] and pswid to validate validate CRBs: + * + * CCW[0] Reserved bit. When NX pastes CRB, CCW[0]=0 + * OS sets this bit to 1 after reading CRB. + * pswid NX assigns window ID. Set pswid to -1 after + * reading CRB from fault FIFO. + * + * We exit this function if no valid CRBs are available to process. + * So acquire fault_lock and reset fifo_in_progress to 0 before + * exit. + * In case kernel receives another interrupt with different page + * fault, interrupt handler returns with IRQ_HANDLED if + * fifo_in_progress is set. Means these new faults will be + * handled by the current thread. Otherwise set fifo_in_progress + * and return IRQ_WAKE_THREAD to wake up thread. + */ + while (true) { + spin_lock_irqsave(&vinst->fault_lock, flags); + /* + * Advance the fault fifo pointer to next CRB. + * Use CRB_SIZE rather than sizeof(*crb) since the latter is + * aligned to CRB_ALIGN (256) but the CRB written to by VAS is + * only CRB_SIZE in len. + */ + fifo = vinst->fault_fifo + (vinst->fault_crbs * CRB_SIZE); + entry = fifo; + + if ((entry->stamp.nx.pswid == cpu_to_be32(FIFO_INVALID_ENTRY)) + || (entry->ccw & cpu_to_be32(CCW0_INVALID))) { + vinst->fifo_in_progress = 0; + spin_unlock_irqrestore(&vinst->fault_lock, flags); + return IRQ_HANDLED; + } + + spin_unlock_irqrestore(&vinst->fault_lock, flags); + vinst->fault_crbs++; + if (vinst->fault_crbs == (vinst->fault_fifo_size / CRB_SIZE)) + vinst->fault_crbs = 0; + + memcpy(crb, fifo, CRB_SIZE); + entry->stamp.nx.pswid = cpu_to_be32(FIFO_INVALID_ENTRY); + entry->ccw |= cpu_to_be32(CCW0_INVALID); + + pr_devel("VAS[%d] fault_fifo %p, fifo %p, fault_crbs %d\n", + vinst->vas_id, vinst->fault_fifo, fifo, + vinst->fault_crbs); + + window = vas_pswid_to_window(vinst, + be32_to_cpu(crb->stamp.nx.pswid)); + + if (IS_ERR(window)) { + /* + * We got an interrupt about a specific send + * window but we can't find that window and we can't + * even clean it up (return credit on user space + * window). + * But we should not get here. + * TODO: Disable IRQ. + */ + pr_err("VAS[%d] fault_fifo %p, fifo %p, pswid 0x%x, fault_crbs %d bad CRB?\n", + vinst->vas_id, vinst->fault_fifo, fifo, + be32_to_cpu(crb->stamp.nx.pswid), + vinst->fault_crbs); + + WARN_ON_ONCE(1); + } + + } +} + +irqreturn_t vas_fault_handler(int irq, void *dev_id) +{ + struct vas_instance *vinst = dev_id; + irqreturn_t ret = IRQ_WAKE_THREAD; + unsigned long flags; + + /* + * NX can generate an interrupt for multiple faults. So the + * fault handler thread process all CRBs until finds invalid + * entry. In case if NX sees continuous faults, it is possible + * that the thread function entered with the first interrupt + * can execute and process all valid CRBs. + * So wake up thread only if the fault thread is not in progress. + */ + spin_lock_irqsave(&vinst->fault_lock, flags); + + if (vinst->fifo_in_progress) + ret = IRQ_HANDLED; + else + vinst->fifo_in_progress = 1; + + spin_unlock_irqrestore(&vinst->fault_lock, flags); + + return ret; +} + /* * Fault window is opened per VAS instance. NX pastes fault CRB in fault * FIFO upon page faults. diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 063cda2b540f..f12f7eb1a5a6 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1049,6 +1049,15 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, goto free_window; } } else { + /* + * Interrupt hanlder or fault window setup failed. Means + * NX can not generate fault for page fault. So not + * opening for user space tx window. + */ + if (!vinst->virq) { + rc = -ENODEV; + goto free_window; + } /* * A user mapping must ensure that context switch issues * CP_ABORT for this thread. @@ -1307,3 +1316,54 @@ int vas_win_close(struct vas_window *window) return 0; } EXPORT_SYMBOL_GPL(vas_win_close); + +struct vas_window *vas_pswid_to_window(struct vas_instance *vinst, + uint32_t pswid) +{ + struct vas_window *window; + int winid; + + if (!pswid) { + pr_devel("%s: called for pswid 0!\n", __func__); + return ERR_PTR(-ESRCH); + } + + decode_pswid(pswid, NULL, &winid); + + if (winid >= VAS_WINDOWS_PER_CHIP) + return ERR_PTR(-ESRCH); + + /* + * If application closes the window before the hardware + * returns the fault CRB, we should wait in vas_win_close() + * for the pending requests. so the window must be active + * and the process alive. + * + * If its a kernel process, we should not get any faults and + * should not get here. + */ + window = vinst->windows[winid]; + + if (!window) { + pr_err("PSWID decode: Could not find window for winid %d pswid %d vinst 0x%p\n", + winid, pswid, vinst); + return NULL; + } + + /* + * Do some sanity checks on the decoded window. Window should be + * NX GZIP user send window. FTW windows should not incur faults + * since their CRBs are ignored (not queued on FIFO or processed + * by NX). + */ + if (!window->tx_win || !window->user_win || !window->nx_win || + window->cop == VAS_COP_TYPE_FAULT || + window->cop == VAS_COP_TYPE_FTW) { + pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n", + winid, window->tx_win, window->user_win, + window->nx_win, window->cop); + WARN_ON(1); + } + + return window; +} diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c index 9013a6344aec..598e4cd563fb 100644 --- a/arch/powerpc/platforms/powernv/vas.c +++ b/arch/powerpc/platforms/powernv/vas.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include @@ -26,7 +28,25 @@ static DEFINE_PER_CPU(int, cpu_vas_id); static int vas_irq_fault_window_setup(struct vas_instance *vinst) { - return vas_setup_fault_window(vinst); + char devname[64]; + int rc = 0; + + snprintf(devname, sizeof(devname), "vas-%d", vinst->vas_id); + rc = request_threaded_irq(vinst->virq, vas_fault_handler, + vas_fault_thread_fn, 0, devname, vinst); + + if (rc) { + pr_err("VAS[%d]: Request IRQ(%d) failed with %d\n", + vinst->vas_id, vinst->virq, rc); + goto out; + } + + rc = vas_setup_fault_window(vinst); + if (rc) + free_irq(vinst->virq, vinst); + +out: + return rc; } static int init_vas_instance(struct platform_device *pdev) @@ -119,6 +139,7 @@ static int init_vas_instance(struct platform_device *pdev) list_add(&vinst->node, &vas_instances); mutex_unlock(&vas_mutex); + spin_lock_init(&vinst->fault_lock); /* * IRQ and fault handling setup is needed only for user space * send windows. diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 2a040722dffa..cd165c8b2b57 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -331,7 +331,10 @@ struct vas_instance { u64 irq_port; int virq; + int fault_crbs; int fault_fifo_size; + int fifo_in_progress; /* To wake up thread or return IRQ_HANDLED */ + spinlock_t fault_lock; /* Protects fifo_in_progress update */ void *fault_fifo; struct vas_window *fault_win; /* Fault window */ @@ -431,6 +434,10 @@ extern void vas_instance_init_dbgdir(struct vas_instance *vinst); extern void vas_window_init_dbgdir(struct vas_window *win); extern void vas_window_free_dbgdir(struct vas_window *win); extern int vas_setup_fault_window(struct vas_instance *vinst); +extern irqreturn_t vas_fault_thread_fn(int irq, void *data); +extern irqreturn_t vas_fault_handler(int irq, void *dev_id); +extern struct vas_window *vas_pswid_to_window(struct vas_instance *vinst, + uint32_t pswid); static inline int vas_window_pid(struct vas_window *window) { -- cgit v1.2.3-59-g8ed1b From c96c4436aba4c12f1f48369f2f90bc43e12fe36c Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:03:42 -0700 Subject: powerpc/vas: Update CSB and notify process for fault CRBs Applications polls on CSB for the status update after requests are issued. NX process these requests and update the CSB with the status. If it encounters translation error, pastes CRB in fault FIFO and raises an interrupt. The kernel handles fault by reading CRB from fault FIFO and process the fault CRB. For each fault CRB, update fault address in CRB (fault_storage_addr) and translation error status in CSB so that user space can touch the fault address and resend the request. If the user space passed invalid CSB address send signal to process with SIGSEGV. In the case of multi-thread applications, child thread may not be available. So if the task is not running, send signal to tgid. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587017022.2275.1063.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-fault.c | 126 ++++++++++++++++++++++++++++- 1 file changed, 125 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index 0da8358b357d..354577dcf2dc 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -25,6 +26,128 @@ */ #define VAS_FAULT_WIN_FIFO_SIZE (4 << 20) +/* + * Update the CSB to indicate a translation error. + * + * User space will be polling on CSB after the request is issued. + * If NX can handle the request without any issues, it updates CSB. + * Whereas if NX encounters page fault, the kernel will handle the + * fault and update CSB with translation error. + * + * If we are unable to update the CSB means copy_to_user failed due to + * invalid csb_addr, send a signal to the process. + */ +static void update_csb(struct vas_window *window, + struct coprocessor_request_block *crb) +{ + struct coprocessor_status_block csb; + struct kernel_siginfo info; + struct task_struct *tsk; + void __user *csb_addr; + struct pid *pid; + int rc; + + /* + * NX user space windows can not be opened for task->mm=NULL + * and faults will not be generated for kernel requests. + */ + if (WARN_ON_ONCE(!window->mm || !window->user_win)) + return; + + csb_addr = (void __user *)be64_to_cpu(crb->csb_addr); + + memset(&csb, 0, sizeof(csb)); + csb.cc = CSB_CC_TRANSLATION; + csb.ce = CSB_CE_TERMINATION; + csb.cs = 0; + csb.count = 0; + + /* + * NX operates and returns in BE format as defined CRB struct. + * So saves fault_storage_addr in BE as NX pastes in FIFO and + * expects user space to convert to CPU format. + */ + csb.address = crb->stamp.nx.fault_storage_addr; + csb.flags = 0; + + pid = window->pid; + tsk = get_pid_task(pid, PIDTYPE_PID); + /* + * Process closes send window after all pending NX requests are + * completed. In multi-thread applications, a child thread can + * open a window and can exit without closing it. May be some + * requests are pending or this window can be used by other + * threads later. We should handle faults if NX encounters + * pages faults on these requests. Update CSB with translation + * error and fault address. If csb_addr passed by user space is + * invalid, send SEGV signal to pid saved in window. If the + * child thread is not running, send the signal to tgid. + * Parent thread (tgid) will close this window upon its exit. + * + * pid and mm references are taken when window is opened by + * process (pid). So tgid is used only when child thread opens + * a window and exits without closing it. + */ + if (!tsk) { + pid = window->tgid; + tsk = get_pid_task(pid, PIDTYPE_PID); + /* + * Parent thread (tgid) will be closing window when it + * exits. So should not get here. + */ + if (WARN_ON_ONCE(!tsk)) + return; + } + + /* Return if the task is exiting. */ + if (tsk->flags & PF_EXITING) { + put_task_struct(tsk); + return; + } + + use_mm(window->mm); + rc = copy_to_user(csb_addr, &csb, sizeof(csb)); + /* + * User space polls on csb.flags (first byte). So add barrier + * then copy first byte with csb flags update. + */ + if (!rc) { + csb.flags = CSB_V; + /* Make sure update to csb.flags is visible now */ + smp_mb(); + rc = copy_to_user(csb_addr, &csb, sizeof(u8)); + } + unuse_mm(window->mm); + put_task_struct(tsk); + + /* Success */ + if (!rc) + return; + + pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n", + csb_addr, pid_vnr(pid)); + + clear_siginfo(&info); + info.si_signo = SIGSEGV; + info.si_errno = EFAULT; + info.si_code = SEGV_MAPERR; + info.si_addr = csb_addr; + + /* + * process will be polling on csb.flags after request is sent to + * NX. So generally CSB update should not fail except when an + * application passes invalid csb_addr. So an error message will + * be displayed and leave it to user space whether to ignore or + * handle this signal. + */ + rcu_read_lock(); + rc = kill_pid_info(SIGSEGV, &info, pid); + rcu_read_unlock(); + + pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__, + pid_vnr(pid), rc); +} + /* * Process valid CRBs in fault FIFO. * NX process user space requests, return credit and update the status @@ -124,8 +247,9 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data) vinst->fault_crbs); WARN_ON_ONCE(1); + } else { + update_csb(window, crb); } - } } -- cgit v1.2.3-59-g8ed1b From 461862ef94a88e4b74e177b82bee2c81c6535fae Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:04:19 -0700 Subject: powerpc/vas: Return credits after handling fault NX uses credit mechanism to control the number of requests issued on a specific window at any point of time. Only send windows and fault window are used credits. When the request is issued on a given window, a credit is taken. This credit will be returned after that request is processed. If credits are not available, returns RMA_Busy for send window and RMA_Reject for fault window. NX expects OS to return credit for send window after processing fault CRB. Also credit has to be returned for fault window after handling the fault. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587017059.2275.1064.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-fault.c | 9 ++++++++ arch/powerpc/platforms/powernv/vas-window.c | 36 +++++++++++++++++++++++++++++ arch/powerpc/platforms/powernv/vas.h | 1 + 3 files changed, 46 insertions(+) diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index 354577dcf2dc..b6bec64d793e 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -224,6 +224,10 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data) memcpy(crb, fifo, CRB_SIZE); entry->stamp.nx.pswid = cpu_to_be32(FIFO_INVALID_ENTRY); entry->ccw |= cpu_to_be32(CCW0_INVALID); + /* + * Return credit for the fault window. + */ + vas_return_credit(vinst->fault_win, false); pr_devel("VAS[%d] fault_fifo %p, fifo %p, fault_crbs %d\n", vinst->vas_id, vinst->fault_fifo, fifo, @@ -249,6 +253,11 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data) WARN_ON_ONCE(1); } else { update_csb(window, crb); + /* + * Return credit for send window after processing + * fault CRB. + */ + vas_return_credit(window, true); } } } diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index f12f7eb1a5a6..3ef71209ff58 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1317,6 +1317,42 @@ int vas_win_close(struct vas_window *window) } EXPORT_SYMBOL_GPL(vas_win_close); +/* + * Return credit for the given window. + * Send windows and fault window uses credit mechanism as follows: + * + * Send windows: + * - The default number of credits available for each send window is + * 1024. It means 1024 requests can be issued asynchronously at the + * same time. If the credit is not available, that request will be + * returned with RMA_Busy. + * - One credit is taken when NX request is issued. + * - This credit is returned after NX processed that request. + * - If NX encounters translation error, kernel will return the + * credit on the specific send window after processing the fault CRB. + * + * Fault window: + * - The total number credits available is FIFO_SIZE/CRB_SIZE. + * Means 4MB/128 in the current implementation. If credit is not + * available, RMA_Reject is returned. + * - A credit is taken when NX pastes CRB in fault FIFO. + * - The kernel with return credit on fault window after reading entry + * from fault FIFO. + */ +void vas_return_credit(struct vas_window *window, bool tx) +{ + uint64_t val; + + val = 0ULL; + if (tx) { /* send window */ + val = SET_FIELD(VAS_TX_WCRED, val, 1); + write_hvwc_reg(window, VREG(TX_WCRED_ADDER), val); + } else { + val = SET_FIELD(VAS_LRX_WCRED, val, 1); + write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), val); + } +} + struct vas_window *vas_pswid_to_window(struct vas_instance *vinst, uint32_t pswid) { diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index cd165c8b2b57..60bdda663750 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -436,6 +436,7 @@ extern void vas_window_free_dbgdir(struct vas_window *win); extern int vas_setup_fault_window(struct vas_instance *vinst); extern irqreturn_t vas_fault_thread_fn(int irq, void *data); extern irqreturn_t vas_fault_handler(int irq, void *dev_id); +extern void vas_return_credit(struct vas_window *window, bool tx); extern struct vas_window *vas_pswid_to_window(struct vas_instance *vinst, uint32_t pswid); -- cgit v1.2.3-59-g8ed1b From cf33e1e9383e1a09f6f3e308ebfbbe1871a19ff8 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:04:59 -0700 Subject: powerpc/vas: Print CRB and FIFO values Dump FIFO entries if could not find send window and print CRB for debugging. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587017099.2275.1067.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-fault.c | 41 ++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index b6bec64d793e..25db70be4c9c 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -26,6 +26,28 @@ */ #define VAS_FAULT_WIN_FIFO_SIZE (4 << 20) +static void dump_crb(struct coprocessor_request_block *crb) +{ + struct data_descriptor_entry *dde; + struct nx_fault_stamp *nx; + + dde = &crb->source; + pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n", + be64_to_cpu(dde->address), be32_to_cpu(dde->length), + dde->count, dde->index, dde->flags); + + dde = &crb->target; + pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n", + be64_to_cpu(dde->address), be32_to_cpu(dde->length), + dde->count, dde->index, dde->flags); + + nx = &crb->stamp.nx; + pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n", + be32_to_cpu(nx->pswid), + be64_to_cpu(crb->stamp.nx.fault_storage_addr), + nx->flags, nx->fault_status); +} + /* * Update the CSB to indicate a translation error. * @@ -148,6 +170,23 @@ static void update_csb(struct vas_window *window, pid_vnr(pid), rc); } +static void dump_fifo(struct vas_instance *vinst, void *entry) +{ + unsigned long *end = vinst->fault_fifo + vinst->fault_fifo_size; + unsigned long *fifo = entry; + int i; + + pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size, + vinst->fault_fifo_size / CRB_SIZE); + + /* Dump 10 CRB entries or until end of FIFO */ + pr_err("Fault FIFO Dump:\n"); + for (i = 0; i < 10*(CRB_SIZE/8) && fifo < end; i += 4, fifo += 4) { + pr_err("[%.3d, %p]: 0x%.16lx 0x%.16lx 0x%.16lx 0x%.16lx\n", + i, fifo, *fifo, *(fifo+1), *(fifo+2), *(fifo+3)); + } +} + /* * Process valid CRBs in fault FIFO. * NX process user space requests, return credit and update the status @@ -233,6 +272,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data) vinst->vas_id, vinst->fault_fifo, fifo, vinst->fault_crbs); + dump_crb(crb); window = vas_pswid_to_window(vinst, be32_to_cpu(crb->stamp.nx.pswid)); @@ -245,6 +285,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data) * But we should not get here. * TODO: Disable IRQ. */ + dump_fifo(vinst, (void *)entry); pr_err("VAS[%d] fault_fifo %p, fifo %p, pswid 0x%x, fault_crbs %d bad CRB?\n", vinst->vas_id, vinst->fault_fifo, fifo, be32_to_cpu(crb->stamp.nx.pswid), -- cgit v1.2.3-59-g8ed1b From 04f6296ca7c34cbc78e077518c634cc5c392b3ec Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:05:36 -0700 Subject: powerpc/vas: Do not use default credits for receive window System checkstops if RxFIFO overruns with more requests than the maximum possible number of CRBs allowed in FIFO at any time. So max credits value (rxattr.wcreds_max) is set and is passed to vas_rx_win_open() by the the driver. Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587017136.2275.1070.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-window.c | 4 ++-- arch/powerpc/platforms/powernv/vas.h | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 3ef71209ff58..4b5adf521882 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -772,7 +772,7 @@ static bool rx_win_args_valid(enum vas_cop_type cop, if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX) return false; - if (attr->wcreds_max > VAS_RX_WCREDS_MAX) + if (!attr->wcreds_max) return false; if (attr->nx_win) { @@ -877,7 +877,7 @@ struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop, rxwin->nx_win = rxattr->nx_win; rxwin->user_win = rxattr->user_win; rxwin->cop = cop; - rxwin->wcreds_max = rxattr->wcreds_max ?: VAS_WCREDS_DEFAULT; + rxwin->wcreds_max = rxattr->wcreds_max; init_winctx_for_rxwin(rxwin, rxattr, &winctx); init_winctx_regs(rxwin, &winctx); diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 60bdda663750..a7143b16232f 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -101,11 +101,9 @@ /* * Initial per-process credits. * Max send window credits: 4K-1 (12-bits in VAS_TX_WCRED) - * Max receive window credits: 64K-1 (16 bits in VAS_LRX_WCRED) * * TODO: Needs tuning for per-process credits */ -#define VAS_RX_WCREDS_MAX ((64 << 10) - 1) #define VAS_TX_WCREDS_MAX ((4 << 10) - 1) #define VAS_WCREDS_DEFAULT (1 << 10) -- cgit v1.2.3-59-g8ed1b From bd4da68dbd5fd2e83841b3500ac575967a948cfa Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:06:59 -0700 Subject: powerpc/vas: Display process stuck message Process can not close send window until all requests are processed. Means wait until window state is not busy and send credits are returned. Display debug messages in case taking longer to close the window. Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587017219.2275.1073.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-window.c | 30 ++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 4b5adf521882..d0c07cf03f68 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1181,6 +1181,7 @@ static void poll_window_credits(struct vas_window *window) { u64 val; int creds, mode; + int count = 0; val = read_hvwc_reg(window, VREG(WINCTL)); if (window->tx_win) @@ -1199,10 +1200,27 @@ retry: creds = GET_FIELD(VAS_LRX_WCRED, val); } + /* + * Takes around few milliseconds to complete all pending requests + * and return credits. + * TODO: Scan fault FIFO and invalidate CRBs points to this window + * and issue CRB Kill to stop all pending requests. Need only + * if there is a bug in NX or fault handling in kernel. + */ if (creds < window->wcreds_max) { val = 0; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); + count++; + /* + * Process can not close send window until all credits are + * returned. + */ + if (!(count % 1000)) + pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n", + vas_window_pid(window), window->winid, + creds, count); + goto retry; } } @@ -1216,6 +1234,7 @@ static void poll_window_busy_state(struct vas_window *window) { int busy; u64 val; + int count = 0; retry: val = read_hvwc_reg(window, VREG(WIN_STATUS)); @@ -1223,7 +1242,16 @@ retry: if (busy) { val = 0; set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(5)); + schedule_timeout(msecs_to_jiffies(10)); + count++; + /* + * Takes around few milliseconds to process all pending + * requests. + */ + if (!(count % 1000)) + pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n", + vas_window_pid(window), window->winid, count); + goto retry; } } -- cgit v1.2.3-59-g8ed1b From 1d955f981895464d8f112179a20bfdb92e6c63d4 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:07:36 -0700 Subject: powerpc/vas: Free send window in VAS instance after credits returned NX may be processing requests while trying to close window. Wait until all credits are returned and then free send window from VAS instance. Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587017256.2275.1076.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-window.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index d0c07cf03f68..e15b40596746 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1316,14 +1316,14 @@ int vas_win_close(struct vas_window *window) unmap_paste_region(window); - clear_vinst_win(window); - poll_window_busy_state(window); unpin_close_window(window); poll_window_credits(window); + clear_vinst_win(window); + poll_window_castout(window); /* if send window, drop reference to matching receive window */ -- cgit v1.2.3-59-g8ed1b From c420644c0a8f8839ca7269acbb8a3fc7fe1ec97d Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 15 Apr 2020 23:08:11 -0700 Subject: powerpc: Use mm_context vas_windows counter to issue CP_ABORT set_thread_uses_vas() sets used_vas flag for a process that opened VAS window and issue CP_ABORT during context switch for only that process. In multi-thread application, windows can be shared. For example Thread A can open a window and Thread B can run COPY/PASTE instructions to send NX request which may cause corruption or snooping or a covert channel Also once this flag is set, continue to run CP_ABORT even the VAS window is closed. So define vas-windows counter in process mm_context, increment this counter for each window open and decrement it for window close. If vas-windows is set, issue CP_ABORT during context switch. It means clear the foreign real address mapping only if the process / thread uses COPY/PASTE. Then disable it for that process if windows are not open. Moved set_thread_uses_vas() code to vas_tx_win_open() as this functionality is needed only for userspace open windows. We are adding VAS userspace support along with this fix. So no need to include this fix in stable releases. Fixes: 9d2a4d71332c ("powerpc: Define set_thread_uses_vas()") Signed-off-by: Haren Myneni Reported-by: Nicholas Piggin Suggested-by: Milton Miller Suggested-by: Nicholas Piggin Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587017291.2275.1077.camel@hbabu-laptop --- arch/powerpc/include/asm/book3s/64/mmu.h | 3 +++ arch/powerpc/include/asm/mmu_context.h | 30 +++++++++++++++++++++++++++++ arch/powerpc/include/asm/processor.h | 1 - arch/powerpc/include/asm/switch_to.h | 2 -- arch/powerpc/kernel/process.c | 24 ++--------------------- arch/powerpc/platforms/powernv/vas-window.c | 22 ++++++++++++--------- 6 files changed, 48 insertions(+), 34 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index bb3deb76c951..f0a9ff690881 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -116,6 +116,9 @@ typedef struct { /* Number of users of the external (Nest) MMU */ atomic_t copros; + /* Number of user space windows opened in process mm_context */ + atomic_t vas_windows; + struct hash_mm_context *hash_context; unsigned long vdso_base; diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 360367c579de..1a474f6b1992 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -185,11 +185,41 @@ static inline void mm_context_remove_copro(struct mm_struct *mm) dec_mm_active_cpus(mm); } } + +/* + * vas_windows counter shows number of open windows in the mm + * context. During context switch, use this counter to clear the + * foreign real address mapping (CP_ABORT) for the thread / process + * that intend to use COPY/PASTE. When a process closes all windows, + * disable CP_ABORT which is expensive to run. + * + * For user context, register a copro so that TLBIs are seen by the + * nest MMU. mm_context_add/remove_vas_window() are used only for user + * space windows. + */ +static inline void mm_context_add_vas_window(struct mm_struct *mm) +{ + atomic_inc(&mm->context.vas_windows); + mm_context_add_copro(mm); +} + +static inline void mm_context_remove_vas_window(struct mm_struct *mm) +{ + int v; + + mm_context_remove_copro(mm); + v = atomic_dec_if_positive(&mm->context.vas_windows); + + /* Detect imbalance between add and remove */ + WARN_ON(v < 0); +} #else static inline void inc_mm_active_cpus(struct mm_struct *mm) { } static inline void dec_mm_active_cpus(struct mm_struct *mm) { } static inline void mm_context_add_copro(struct mm_struct *mm) { } static inline void mm_context_remove_copro(struct mm_struct *mm) { } +static inline void mm_context_add_vas_windows(struct mm_struct *mm) { } +static inline void mm_context_remove_vas_windows(struct mm_struct *mm) { } #endif diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index eedcbfb9a6ff..bfa336fbcfeb 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -272,7 +272,6 @@ struct thread_struct { unsigned mmcr0; unsigned used_ebb; - unsigned int used_vas; #endif }; diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index b867b58b1093..fdab93428372 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -102,8 +102,6 @@ static inline void clear_task_ebb(struct task_struct *t) #endif } -extern int set_thread_uses_vas(void); - extern int set_thread_tidr(struct task_struct *t); #endif /* _ASM_POWERPC_SWITCH_TO_H */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9c21288f8645..8479c762aef2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1228,7 +1228,8 @@ struct task_struct *__switch_to(struct task_struct *prev, * mappings, we must issue a cp_abort to clear any state and * prevent snooping, corruption or a covert channel. */ - if (current->thread.used_vas) + if (current->mm && + atomic_read(¤t->mm->context.vas_windows)) asm volatile(PPC_CP_ABORT); } #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -1467,27 +1468,6 @@ void arch_setup_new_exec(void) } #endif -int set_thread_uses_vas(void) -{ -#ifdef CONFIG_PPC_BOOK3S_64 - if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -EINVAL; - - current->thread.used_vas = 1; - - /* - * Even a process that has no foreign real address mapping can use - * an unpaired COPY instruction (to no real effect). Issue CP_ABORT - * to clear any pending COPY and prevent a covert channel. - * - * __switch_to() will issue CP_ABORT on future context switches. - */ - asm volatile(PPC_CP_ABORT); - -#endif /* CONFIG_PPC_BOOK3S_64 */ - return 0; -} - #ifdef CONFIG_PPC64 /** * Assign a TIDR (thread ID) for task @t and set it in the thread diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index e15b40596746..d62787f502c9 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1058,13 +1058,6 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, rc = -ENODEV; goto free_window; } - /* - * A user mapping must ensure that context switch issues - * CP_ABORT for this thread. - */ - rc = set_thread_uses_vas(); - if (rc) - goto free_window; /* * Window opened by a child thread may not be closed when @@ -1090,7 +1083,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, mmgrab(txwin->mm); mmput(txwin->mm); - mm_context_add_copro(txwin->mm); + mm_context_add_vas_window(txwin->mm); /* * Process closes window during exit. In the case of * multithread application, the child thread can open @@ -1099,6 +1092,17 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, * to take pid reference for parent thread. */ txwin->tgid = find_get_pid(task_tgid_vnr(current)); + /* + * Even a process that has no foreign real address mapping can + * use an unpaired COPY instruction (to no real effect). Issue + * CP_ABORT to clear any pending COPY and prevent a covert + * channel. + * + * __switch_to() will issue CP_ABORT on future context switches + * if process / thread has any open VAS window (Use + * current->mm->context.vas_windows). + */ + asm volatile(PPC_CP_ABORT); } set_vinst_win(vinst, txwin); @@ -1332,7 +1336,7 @@ int vas_win_close(struct vas_window *window) /* Drop references to pid and mm */ put_pid(window->pid); if (window->mm) { - mm_context_remove_copro(window->mm); + mm_context_remove_vas_window(window->mm); mmdrop(window->mm); } } -- cgit v1.2.3-59-g8ed1b From a8c0c69b5e95e8f155480d5203a7bafb8024fd93 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:00:29 -0700 Subject: powerpc/vas: Initialize window attributes for GZIP coprocessor type Initialize send and receive window attributes for GZIP high and normal priority types. Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114029.2275.1103.camel@hbabu-laptop --- arch/powerpc/platforms/powernv/vas-window.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index d62787f502c9..52844a19ca65 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -817,7 +817,8 @@ void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop) { memset(rxattr, 0, sizeof(*rxattr)); - if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) { + if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI || + cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) { rxattr->pin_win = true; rxattr->nx_win = true; rxattr->fault_win = false; @@ -892,7 +893,8 @@ void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop) { memset(txattr, 0, sizeof(*txattr)); - if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) { + if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI || + cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) { txattr->rej_no_credit = false; txattr->rx_wcred_mode = true; txattr->tx_wcred_mode = true; @@ -976,9 +978,14 @@ static bool tx_win_args_valid(enum vas_cop_type cop, if (attr->wcreds_max > VAS_TX_WCREDS_MAX) return false; - if (attr->user_win && - (cop != VAS_COP_TYPE_FTW || attr->rsvd_txbuf_count)) - return false; + if (attr->user_win) { + if (attr->rsvd_txbuf_count) + return false; + + if (cop != VAS_COP_TYPE_FTW && cop != VAS_COP_TYPE_GZIP && + cop != VAS_COP_TYPE_GZIP_HIPRI) + return false; + } return true; } -- cgit v1.2.3-59-g8ed1b From 45f25a79fe50f330b563d012fc856a1103cb00d8 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:01:05 -0700 Subject: powerpc/vas: Define VAS_TX_WIN_OPEN ioctl API Define the VAS_TX_WIN_OPEN ioctl interface for NX GZIP access from user space. This interface is used to open GZIP send window and mmap region which can be used by userspace to send requests to NX directly with copy/paste instructions. Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114065.2275.1106.camel@hbabu-laptop --- Documentation/userspace-api/ioctl/ioctl-number.rst | 1 + arch/powerpc/include/uapi/asm/vas-api.h | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 arch/powerpc/include/uapi/asm/vas-api.h diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index f759edafd938..f18accb1bcd8 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -286,6 +286,7 @@ Code Seq# Include File Comments 'v' 00-1F linux/fs.h conflict! 'v' 00-0F linux/sonypi.h conflict! 'v' 00-0F media/v4l2-subdev.h conflict! +'v' 20-27 arch/powerpc/include/uapi/asm/vas-api.h VAS API 'v' C0-FF linux/meye.h conflict! 'w' all CERN SCI driver 'y' 00-1F packet based user level communications diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h new file mode 100644 index 000000000000..fe95d67e3bab --- /dev/null +++ b/arch/powerpc/include/uapi/asm/vas-api.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Copyright 2019 IBM Corp. + */ + +#ifndef _UAPI_MISC_VAS_H +#define _UAPI_MISC_VAS_H + +#include + +#define VAS_MAGIC 'v' +#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr) + +struct vas_tx_win_open_attr { + __u32 version; + __s16 vas_id; /* specific instance of vas or -1 for default */ + __u16 reserved1; + __u64 flags; /* Future use */ + __u64 reserved2[6]; +}; + +#endif /* _UAPI_MISC_VAS_H */ -- cgit v1.2.3-59-g8ed1b From dda44eb29c235735a5ceae283dc521cfca27885c Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:02:01 -0700 Subject: powerpc/vas: Add VAS user space API On power9, userspace can send GZIP compression requests directly to NX once kernel establishes NX channel / window with VAS. This patch provides user space API which allows user space to establish channel using open VAS_TX_WIN_OPEN ioctl, mmap and close operations. Each window corresponds to file descriptor and application can open multiple windows. After the window is opened, VAS_TX_WIN_OPEN icoctl to open a window on specific VAS instance, mmap() system call to map the hardware address of engine's request queue into the application's virtual address space. Then the application can then submit one or more requests to the the engine by using the copy/paste instructions and pasting the CRBs to the virtual address (aka paste_address) returned by mmap(). Only NX GZIP coprocessor type is supported right now and allow GZIP engine access via /dev/crypto/nx-gzip device node. Thanks to Michael Ellerman for his changes and suggestions to make the ioctl generic to support any coprocessor type. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114121.2275.1109.camel@hbabu-laptop --- arch/powerpc/include/asm/vas.h | 12 ++ arch/powerpc/platforms/powernv/Makefile | 2 +- arch/powerpc/platforms/powernv/vas-api.c | 278 ++++++++++++++++++++++++++++ arch/powerpc/platforms/powernv/vas-window.c | 6 +- arch/powerpc/platforms/powernv/vas.h | 2 + 5 files changed, 296 insertions(+), 4 deletions(-) create mode 100644 arch/powerpc/platforms/powernv/vas-api.c diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h index f93e6b0f5c84..6e427bc29b3a 100644 --- a/arch/powerpc/include/asm/vas.h +++ b/arch/powerpc/include/asm/vas.h @@ -163,4 +163,16 @@ int vas_copy_crb(void *crb, int offset); */ int vas_paste_crb(struct vas_window *win, int offset, bool re); +/* + * Register / unregister coprocessor type to VAS API which will be exported + * to user space. Applications can use this API to open / close window + * which can be used to send / receive requests directly to cooprcessor. + * + * Only NX GZIP coprocessor type is supported now, but this API can be + * used for others in future. + */ +int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type, + const char *name); +void vas_unregister_coproc_api(void); + #endif /* __ASM_POWERPC_VAS_H */ diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 395789ffc482..fe3f0fb5aeca 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o obj-$(CONFIG_OPAL_PRD) += opal-prd.o obj-$(CONFIG_PERF_EVENTS) += opal-imc.o obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o -obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o +obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o vas-api.o obj-$(CONFIG_OCXL_BASE) += ocxl.o obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o obj-$(CONFIG_PPC_SECURE_BOOT) += opal-secvar.o diff --git a/arch/powerpc/platforms/powernv/vas-api.c b/arch/powerpc/platforms/powernv/vas-api.c new file mode 100644 index 000000000000..98ed5d8c5441 --- /dev/null +++ b/arch/powerpc/platforms/powernv/vas-api.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * VAS user space API for its accelerators (Only NX-GZIP is supported now) + * Copyright (C) 2019 Haren Myneni, IBM Corp + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "vas.h" + +/* + * The driver creates the device node that can be used as follows: + * For NX-GZIP + * + * fd = open("/dev/crypto/nx-gzip", O_RDWR); + * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr); + * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL). + * vas_copy(&crb, 0, 1); + * vas_paste(paste_addr, 0, 1); + * close(fd) or exit process to close window. + * + * where "vas_copy" and "vas_paste" are defined in copy-paste.h. + * copy/paste returns to the user space directly. So refer NX hardware + * documententation for exact copy/paste usage and completion / error + * conditions. + */ + +/* + * Wrapper object for the nx-gzip device - there is just one instance of + * this node for the whole system. + */ +static struct coproc_dev { + struct cdev cdev; + struct device *device; + char *name; + dev_t devt; + struct class *class; + enum vas_cop_type cop_type; +} coproc_device; + +struct coproc_instance { + struct coproc_dev *coproc; + struct vas_window *txwin; +}; + +static char *coproc_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev)); +} + +static int coproc_open(struct inode *inode, struct file *fp) +{ + struct coproc_instance *cp_inst; + + cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL); + if (!cp_inst) + return -ENOMEM; + + cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev, + cdev); + fp->private_data = cp_inst; + + return 0; +} + +static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg) +{ + void __user *uptr = (void __user *)arg; + struct vas_tx_win_attr txattr = {}; + struct vas_tx_win_open_attr uattr; + struct coproc_instance *cp_inst; + struct vas_window *txwin; + int rc, vasid; + + cp_inst = fp->private_data; + + /* + * One window for file descriptor + */ + if (cp_inst->txwin) + return -EEXIST; + + rc = copy_from_user(&uattr, uptr, sizeof(uattr)); + if (rc) { + pr_err("%s(): copy_from_user() returns %d\n", __func__, rc); + return -EFAULT; + } + + if (uattr.version != 1) { + pr_err("Invalid version\n"); + return -EINVAL; + } + + vasid = uattr.vas_id; + + vas_init_tx_win_attr(&txattr, cp_inst->coproc->cop_type); + + txattr.lpid = mfspr(SPRN_LPID); + txattr.pidr = mfspr(SPRN_PID); + txattr.user_win = true; + txattr.rsvd_txbuf_count = false; + txattr.pswid = false; + + pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr, + mfspr(SPRN_PID)); + + txwin = vas_tx_win_open(vasid, cp_inst->coproc->cop_type, &txattr); + if (IS_ERR(txwin)) { + pr_err("%s() vas_tx_win_open() failed, %ld\n", __func__, + PTR_ERR(txwin)); + return PTR_ERR(txwin); + } + + cp_inst->txwin = txwin; + + return 0; +} + +static int coproc_release(struct inode *inode, struct file *fp) +{ + struct coproc_instance *cp_inst = fp->private_data; + + if (cp_inst->txwin) { + vas_win_close(cp_inst->txwin); + cp_inst->txwin = NULL; + } + + kfree(cp_inst); + fp->private_data = NULL; + + /* + * We don't know here if user has other receive windows + * open, so we can't really call clear_thread_tidr(). + * So, once the process calls set_thread_tidr(), the + * TIDR value sticks around until process exits, resulting + * in an extra copy in restore_sprs(). + */ + + return 0; +} + +static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) +{ + struct coproc_instance *cp_inst = fp->private_data; + struct vas_window *txwin; + unsigned long pfn; + u64 paste_addr; + pgprot_t prot; + int rc; + + txwin = cp_inst->txwin; + + if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { + pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__, + (vma->vm_end - vma->vm_start), PAGE_SIZE); + return -EINVAL; + } + + /* Ensure instance has an open send window */ + if (!txwin) { + pr_err("%s(): No send window open?\n", __func__); + return -EINVAL; + } + + vas_win_paste_addr(txwin, &paste_addr, NULL); + pfn = paste_addr >> PAGE_SHIFT; + + /* flags, page_prot from cxl_mmap(), except we want cachable */ + vma->vm_flags |= VM_IO | VM_PFNMAP; + vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); + + prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY); + + rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, prot); + + pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__, + paste_addr, vma->vm_start, rc); + + return rc; +} + +static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case VAS_TX_WIN_OPEN: + return coproc_ioc_tx_win_open(fp, arg); + default: + return -EINVAL; + } +} + +static struct file_operations coproc_fops = { + .open = coproc_open, + .release = coproc_release, + .mmap = coproc_mmap, + .unlocked_ioctl = coproc_ioctl, +}; + +/* + * Supporting only nx-gzip coprocessor type now, but this API code + * extended to other coprocessor types later. + */ +int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type, + const char *name) +{ + int rc = -EINVAL; + dev_t devno; + + rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name); + if (rc) { + pr_err("Unable to allocate coproc major number: %i\n", rc); + return rc; + } + + pr_devel("%s device allocated, dev [%i,%i]\n", name, + MAJOR(coproc_device.devt), MINOR(coproc_device.devt)); + + coproc_device.class = class_create(mod, name); + if (IS_ERR(coproc_device.class)) { + rc = PTR_ERR(coproc_device.class); + pr_err("Unable to create %s class %d\n", name, rc); + goto err_class; + } + coproc_device.class->devnode = coproc_devnode; + coproc_device.cop_type = cop_type; + + coproc_fops.owner = mod; + cdev_init(&coproc_device.cdev, &coproc_fops); + + devno = MKDEV(MAJOR(coproc_device.devt), 0); + rc = cdev_add(&coproc_device.cdev, devno, 1); + if (rc) { + pr_err("cdev_add() failed %d\n", rc); + goto err_cdev; + } + + coproc_device.device = device_create(coproc_device.class, NULL, + devno, NULL, name, MINOR(devno)); + if (IS_ERR(coproc_device.device)) { + rc = PTR_ERR(coproc_device.device); + pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc); + goto err; + } + + pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno), + MINOR(devno)); + + return 0; + +err: + cdev_del(&coproc_device.cdev); +err_cdev: + class_destroy(coproc_device.class); +err_class: + unregister_chrdev_region(coproc_device.devt, 1); + return rc; +} +EXPORT_SYMBOL_GPL(vas_register_coproc_api); + +void vas_unregister_coproc_api(void) +{ + dev_t devno; + + cdev_del(&coproc_device.cdev); + devno = MKDEV(MAJOR(coproc_device.devt), 0); + device_destroy(coproc_device.class, devno); + + class_destroy(coproc_device.class); + unregister_chrdev_region(coproc_device.devt, 1); +} +EXPORT_SYMBOL_GPL(vas_unregister_coproc_api); diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 52844a19ca65..6434f9cb5aed 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -26,7 +26,7 @@ * Compute the paste address region for the window @window using the * ->paste_base_addr and ->paste_win_id_shift we got from device tree. */ -static void compute_paste_address(struct vas_window *window, u64 *addr, int *len) +void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len) { int winid; u64 base, shift; @@ -80,7 +80,7 @@ static void *map_paste_region(struct vas_window *txwin) goto free_name; txwin->paste_addr_name = name; - compute_paste_address(txwin, &start, &len); + vas_win_paste_addr(txwin, &start, &len); if (!request_mem_region(start, len, name)) { pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n", @@ -138,7 +138,7 @@ static void unmap_paste_region(struct vas_window *window) u64 busaddr_start; if (window->paste_kaddr) { - compute_paste_address(window, &busaddr_start, &len); + vas_win_paste_addr(window, &busaddr_start, &len); unmap_region(window->paste_kaddr, busaddr_start, len); window->paste_kaddr = NULL; kfree(window->paste_addr_name); diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index a7143b16232f..70f793e8f6cc 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -437,6 +437,8 @@ extern irqreturn_t vas_fault_handler(int irq, void *dev_id); extern void vas_return_credit(struct vas_window *window, bool tx); extern struct vas_window *vas_pswid_to_window(struct vas_instance *vinst, uint32_t pswid); +extern void vas_win_paste_addr(struct vas_window *window, u64 *addr, + int *len); static inline int vas_window_pid(struct vas_window *window) { -- cgit v1.2.3-59-g8ed1b From 7673d6568b141f34b4b59232f8de8d484408d2ee Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:02:43 -0700 Subject: crypto/nx: Initialize coproc entry with kzalloc coproc entry is initialized during NX probe on power9, but not on P8. nx842_delete_coprocs() is used for both and frees receive window if it is allocated. Getting crash for rmmod on P8 since coproc->vas.rxwin is not initialized. This patch replaces kmalloc with kzalloc in nx842_powernv_probe() Signed-off-by: Haren Myneni Acked-by: Herbert Xu Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114163.2275.1110.camel@hbabu-laptop --- drivers/crypto/nx/nx-842-powernv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index c037a2403b82..8e63326f0866 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -922,7 +922,7 @@ static int __init nx842_powernv_probe(struct device_node *dn) return -EINVAL; } - coproc = kmalloc(sizeof(*coproc), GFP_KERNEL); + coproc = kzalloc(sizeof(*coproc), GFP_KERNEL); if (!coproc) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 32e091a668bc8575c1ad6afd616726c60ba6e5d0 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:03:20 -0700 Subject: crypto/nx: Rename nx-842-powernv file name to nx-common-powernv Rename nx-842-powernv.c to nx-common-powernv.c to add code for setup and enable new GZIP compression type. The actual functionality is not changed in this patch. Signed-off-by: Haren Myneni Acked-by: Herbert Xu Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114200.2275.1113.camel@hbabu-laptop --- drivers/crypto/nx/Makefile | 2 +- drivers/crypto/nx/nx-842-powernv.c | 1062 --------------------------------- drivers/crypto/nx/nx-common-powernv.c | 1062 +++++++++++++++++++++++++++++++++ 3 files changed, 1063 insertions(+), 1063 deletions(-) delete mode 100644 drivers/crypto/nx/nx-842-powernv.c create mode 100644 drivers/crypto/nx/nx-common-powernv.c diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index 015155da59c2..bc89a20e5d9d 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -15,4 +15,4 @@ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compres obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o nx-compress-objs := nx-842.o nx-compress-pseries-objs := nx-842-pseries.o -nx-compress-powernv-objs := nx-842-powernv.o +nx-compress-powernv-objs := nx-common-powernv.o diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c deleted file mode 100644 index 8e63326f0866..000000000000 --- a/drivers/crypto/nx/nx-842-powernv.c +++ /dev/null @@ -1,1062 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Driver for IBM PowerNV 842 compression accelerator - * - * Copyright (C) 2015 Dan Streetman, IBM Corp - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include "nx-842.h" - -#include - -#include -#include -#include -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Dan Streetman "); -MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors"); -MODULE_ALIAS_CRYPTO("842"); -MODULE_ALIAS_CRYPTO("842-nx"); - -#define WORKMEM_ALIGN (CRB_ALIGN) -#define CSB_WAIT_MAX (5000) /* ms */ -#define VAS_RETRIES (10) - -struct nx842_workmem { - /* Below fields must be properly aligned */ - struct coprocessor_request_block crb; /* CRB_ALIGN align */ - struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */ - struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */ - /* Above fields must be properly aligned */ - - ktime_t start; - - char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */ -} __packed __aligned(WORKMEM_ALIGN); - -struct nx842_coproc { - unsigned int chip_id; - unsigned int ct; - unsigned int ci; /* Coprocessor instance, used with icswx */ - struct { - struct vas_window *rxwin; - int id; - } vas; - struct list_head list; -}; - -/* - * Send the request to NX engine on the chip for the corresponding CPU - * where the process is executing. Use with VAS function. - */ -static DEFINE_PER_CPU(struct vas_window *, cpu_txwin); - -/* no cpu hotplug on powernv, so this list never changes after init */ -static LIST_HEAD(nx842_coprocs); -static unsigned int nx842_ct; /* used in icswx function */ - -static int (*nx842_powernv_exec)(const unsigned char *in, - unsigned int inlen, unsigned char *out, - unsigned int *outlenp, void *workmem, int fc); - -/** - * setup_indirect_dde - Setup an indirect DDE - * - * The DDE is setup with the the DDE count, byte count, and address of - * first direct DDE in the list. - */ -static void setup_indirect_dde(struct data_descriptor_entry *dde, - struct data_descriptor_entry *ddl, - unsigned int dde_count, unsigned int byte_count) -{ - dde->flags = 0; - dde->count = dde_count; - dde->index = 0; - dde->length = cpu_to_be32(byte_count); - dde->address = cpu_to_be64(nx842_get_pa(ddl)); -} - -/** - * setup_direct_dde - Setup single DDE from buffer - * - * The DDE is setup with the buffer and length. The buffer must be properly - * aligned. The used length is returned. - * Returns: - * N Successfully set up DDE with N bytes - */ -static unsigned int setup_direct_dde(struct data_descriptor_entry *dde, - unsigned long pa, unsigned int len) -{ - unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa)); - - dde->flags = 0; - dde->count = 0; - dde->index = 0; - dde->length = cpu_to_be32(l); - dde->address = cpu_to_be64(pa); - - return l; -} - -/** - * setup_ddl - Setup DDL from buffer - * - * Returns: - * 0 Successfully set up DDL - */ -static int setup_ddl(struct data_descriptor_entry *dde, - struct data_descriptor_entry *ddl, - unsigned char *buf, unsigned int len, - bool in) -{ - unsigned long pa = nx842_get_pa(buf); - int i, ret, total_len = len; - - if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) { - pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n", - in ? "input" : "output", pa, DDE_BUFFER_ALIGN); - return -EINVAL; - } - - /* only need to check last mult; since buffer must be - * DDE_BUFFER_ALIGN aligned, and that is a multiple of - * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers - * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT. - */ - if (len % DDE_BUFFER_LAST_MULT) { - pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n", - in ? "input" : "output", len, DDE_BUFFER_LAST_MULT); - if (in) - return -EINVAL; - len = round_down(len, DDE_BUFFER_LAST_MULT); - } - - /* use a single direct DDE */ - if (len <= LEN_ON_PAGE(pa)) { - ret = setup_direct_dde(dde, pa, len); - WARN_ON(ret < len); - return 0; - } - - /* use the DDL */ - for (i = 0; i < DDL_LEN_MAX && len > 0; i++) { - ret = setup_direct_dde(&ddl[i], pa, len); - buf += ret; - len -= ret; - pa = nx842_get_pa(buf); - } - - if (len > 0) { - pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n", - total_len, in ? "input" : "output", len); - if (in) - return -EMSGSIZE; - total_len -= len; - } - setup_indirect_dde(dde, ddl, i, total_len); - - return 0; -} - -#define CSB_ERR(csb, msg, ...) \ - pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \ - ##__VA_ARGS__, (csb)->flags, \ - (csb)->cs, (csb)->cc, (csb)->ce, \ - be32_to_cpu((csb)->count)) - -#define CSB_ERR_ADDR(csb, msg, ...) \ - CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \ - (unsigned long)be64_to_cpu((csb)->address)) - -/** - * wait_for_csb - */ -static int wait_for_csb(struct nx842_workmem *wmem, - struct coprocessor_status_block *csb) -{ - ktime_t start = wmem->start, now = ktime_get(); - ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); - - while (!(READ_ONCE(csb->flags) & CSB_V)) { - cpu_relax(); - now = ktime_get(); - if (ktime_after(now, timeout)) - break; - } - - /* hw has updated csb and output buffer */ - barrier(); - - /* check CSB flags */ - if (!(csb->flags & CSB_V)) { - CSB_ERR(csb, "CSB still not valid after %ld us, giving up", - (long)ktime_us_delta(now, start)); - return -ETIMEDOUT; - } - if (csb->flags & CSB_F) { - CSB_ERR(csb, "Invalid CSB format"); - return -EPROTO; - } - if (csb->flags & CSB_CH) { - CSB_ERR(csb, "Invalid CSB chaining state"); - return -EPROTO; - } - - /* verify CSB completion sequence is 0 */ - if (csb->cs) { - CSB_ERR(csb, "Invalid CSB completion sequence"); - return -EPROTO; - } - - /* check CSB Completion Code */ - switch (csb->cc) { - /* no error */ - case CSB_CC_SUCCESS: - break; - case CSB_CC_TPBC_GT_SPBC: - /* not an error, but the compressed data is - * larger than the uncompressed data :( - */ - break; - - /* input data errors */ - case CSB_CC_OPERAND_OVERLAP: - /* input and output buffers overlap */ - CSB_ERR(csb, "Operand Overlap error"); - return -EINVAL; - case CSB_CC_INVALID_OPERAND: - CSB_ERR(csb, "Invalid operand"); - return -EINVAL; - case CSB_CC_NOSPC: - /* output buffer too small */ - return -ENOSPC; - case CSB_CC_ABORT: - CSB_ERR(csb, "Function aborted"); - return -EINTR; - case CSB_CC_CRC_MISMATCH: - CSB_ERR(csb, "CRC mismatch"); - return -EINVAL; - case CSB_CC_TEMPL_INVALID: - CSB_ERR(csb, "Compressed data template invalid"); - return -EINVAL; - case CSB_CC_TEMPL_OVERFLOW: - CSB_ERR(csb, "Compressed data template shows data past end"); - return -EINVAL; - case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */ - /* - * DDE byte count exceeds the limit specified in Maximum - * byte count register. - */ - CSB_ERR(csb, "DDE byte count exceeds the limit"); - return -EINVAL; - - /* these should not happen */ - case CSB_CC_INVALID_ALIGN: - /* setup_ddl should have detected this */ - CSB_ERR_ADDR(csb, "Invalid alignment"); - return -EINVAL; - case CSB_CC_DATA_LENGTH: - /* setup_ddl should have detected this */ - CSB_ERR(csb, "Invalid data length"); - return -EINVAL; - case CSB_CC_WR_TRANSLATION: - case CSB_CC_TRANSLATION: - case CSB_CC_TRANSLATION_DUP1: - case CSB_CC_TRANSLATION_DUP2: - case CSB_CC_TRANSLATION_DUP3: - case CSB_CC_TRANSLATION_DUP4: - case CSB_CC_TRANSLATION_DUP5: - case CSB_CC_TRANSLATION_DUP6: - /* should not happen, we use physical addrs */ - CSB_ERR_ADDR(csb, "Translation error"); - return -EPROTO; - case CSB_CC_WR_PROTECTION: - case CSB_CC_PROTECTION: - case CSB_CC_PROTECTION_DUP1: - case CSB_CC_PROTECTION_DUP2: - case CSB_CC_PROTECTION_DUP3: - case CSB_CC_PROTECTION_DUP4: - case CSB_CC_PROTECTION_DUP5: - case CSB_CC_PROTECTION_DUP6: - /* should not happen, we use physical addrs */ - CSB_ERR_ADDR(csb, "Protection error"); - return -EPROTO; - case CSB_CC_PRIVILEGE: - /* shouldn't happen, we're in HYP mode */ - CSB_ERR(csb, "Insufficient Privilege error"); - return -EPROTO; - case CSB_CC_EXCESSIVE_DDE: - /* shouldn't happen, setup_ddl doesn't use many dde's */ - CSB_ERR(csb, "Too many DDEs in DDL"); - return -EINVAL; - case CSB_CC_TRANSPORT: - case CSB_CC_INVALID_CRB: /* P9 or later */ - /* shouldn't happen, we setup CRB correctly */ - CSB_ERR(csb, "Invalid CRB"); - return -EINVAL; - case CSB_CC_INVALID_DDE: /* P9 or later */ - /* - * shouldn't happen, setup_direct/indirect_dde creates - * DDE right - */ - CSB_ERR(csb, "Invalid DDE"); - return -EINVAL; - case CSB_CC_SEGMENTED_DDL: - /* shouldn't happen, setup_ddl creates DDL right */ - CSB_ERR(csb, "Segmented DDL error"); - return -EINVAL; - case CSB_CC_DDE_OVERFLOW: - /* shouldn't happen, setup_ddl creates DDL right */ - CSB_ERR(csb, "DDE overflow error"); - return -EINVAL; - case CSB_CC_SESSION: - /* should not happen with ICSWX */ - CSB_ERR(csb, "Session violation error"); - return -EPROTO; - case CSB_CC_CHAIN: - /* should not happen, we don't use chained CRBs */ - CSB_ERR(csb, "Chained CRB error"); - return -EPROTO; - case CSB_CC_SEQUENCE: - /* should not happen, we don't use chained CRBs */ - CSB_ERR(csb, "CRB sequence number error"); - return -EPROTO; - case CSB_CC_UNKNOWN_CODE: - CSB_ERR(csb, "Unknown subfunction code"); - return -EPROTO; - - /* hardware errors */ - case CSB_CC_RD_EXTERNAL: - case CSB_CC_RD_EXTERNAL_DUP1: - case CSB_CC_RD_EXTERNAL_DUP2: - case CSB_CC_RD_EXTERNAL_DUP3: - CSB_ERR_ADDR(csb, "Read error outside coprocessor"); - return -EPROTO; - case CSB_CC_WR_EXTERNAL: - CSB_ERR_ADDR(csb, "Write error outside coprocessor"); - return -EPROTO; - case CSB_CC_INTERNAL: - CSB_ERR(csb, "Internal error in coprocessor"); - return -EPROTO; - case CSB_CC_PROVISION: - CSB_ERR(csb, "Storage provision error"); - return -EPROTO; - case CSB_CC_HW: - CSB_ERR(csb, "Correctable hardware error"); - return -EPROTO; - case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */ - CSB_ERR(csb, "Job did not finish within allowed time"); - return -EPROTO; - - default: - CSB_ERR(csb, "Invalid CC %d", csb->cc); - return -EPROTO; - } - - /* check Completion Extension state */ - if (csb->ce & CSB_CE_TERMINATION) { - CSB_ERR(csb, "CSB request was terminated"); - return -EPROTO; - } - if (csb->ce & CSB_CE_INCOMPLETE) { - CSB_ERR(csb, "CSB request not complete"); - return -EPROTO; - } - if (!(csb->ce & CSB_CE_TPBC)) { - CSB_ERR(csb, "TPBC not provided, unknown target length"); - return -EPROTO; - } - - /* successful completion */ - pr_debug_ratelimited("Processed %u bytes in %lu us\n", - be32_to_cpu(csb->count), - (unsigned long)ktime_us_delta(now, start)); - - return 0; -} - -static int nx842_config_crb(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int outlen, - struct nx842_workmem *wmem) -{ - struct coprocessor_request_block *crb; - struct coprocessor_status_block *csb; - u64 csb_addr; - int ret; - - crb = &wmem->crb; - csb = &crb->csb; - - /* Clear any previous values */ - memset(crb, 0, sizeof(*crb)); - - /* set up DDLs */ - ret = setup_ddl(&crb->source, wmem->ddl_in, - (unsigned char *)in, inlen, true); - if (ret) - return ret; - - ret = setup_ddl(&crb->target, wmem->ddl_out, - out, outlen, false); - if (ret) - return ret; - - /* set up CRB's CSB addr */ - csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS; - csb_addr |= CRB_CSB_AT; /* Addrs are phys */ - crb->csb_addr = cpu_to_be64(csb_addr); - - return 0; -} - -/** - * nx842_exec_icswx - compress/decompress data using the 842 algorithm - * - * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. - * This compresses or decompresses the provided input buffer into the provided - * output buffer. - * - * Upon return from this function @outlen contains the length of the - * output data. If there is an error then @outlen will be 0 and an - * error will be specified by the return code from this function. - * - * The @workmem buffer should only be used by one function call at a time. - * - * @in: input buffer pointer - * @inlen: input buffer size - * @out: output buffer pointer - * @outlenp: output buffer size pointer - * @workmem: working memory buffer pointer, size determined by - * nx842_powernv_driver.workmem_size - * @fc: function code, see CCW Function Codes in nx-842.h - * - * Returns: - * 0 Success, output of length @outlenp stored in the buffer at @out - * -ENODEV Hardware unavailable - * -ENOSPC Output buffer is to small - * -EMSGSIZE Input buffer too large - * -EINVAL buffer constraints do not fix nx842_constraints - * -EPROTO hardware error during operation - * -ETIMEDOUT hardware did not complete operation in reasonable time - * -EINTR operation was aborted - */ -static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlenp, - void *workmem, int fc) -{ - struct coprocessor_request_block *crb; - struct coprocessor_status_block *csb; - struct nx842_workmem *wmem; - int ret; - u32 ccw; - unsigned int outlen = *outlenp; - - wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); - - *outlenp = 0; - - /* shoudn't happen, we don't load without a coproc */ - if (!nx842_ct) { - pr_err_ratelimited("coprocessor CT is 0"); - return -ENODEV; - } - - ret = nx842_config_crb(in, inlen, out, outlen, wmem); - if (ret) - return ret; - - crb = &wmem->crb; - csb = &crb->csb; - - /* set up CCW */ - ccw = 0; - ccw = SET_FIELD(CCW_CT, ccw, nx842_ct); - ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */ - ccw = SET_FIELD(CCW_FC_842, ccw, fc); - - wmem->start = ktime_get(); - - /* do ICSWX */ - ret = icswx(cpu_to_be32(ccw), crb); - - pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret, - (unsigned int)ccw, - (unsigned int)be32_to_cpu(crb->ccw)); - - /* - * NX842 coprocessor sets 3rd bit in CR register with XER[S0]. - * XER[S0] is the integer summary overflow bit which is nothing - * to do NX. Since this bit can be set with other return values, - * mask this bit. - */ - ret &= ~ICSWX_XERS0; - - switch (ret) { - case ICSWX_INITIATED: - ret = wait_for_csb(wmem, csb); - break; - case ICSWX_BUSY: - pr_debug_ratelimited("842 Coprocessor busy\n"); - ret = -EBUSY; - break; - case ICSWX_REJECTED: - pr_err_ratelimited("ICSWX rejected\n"); - ret = -EPROTO; - break; - } - - if (!ret) - *outlenp = be32_to_cpu(csb->count); - - return ret; -} - -/** - * nx842_exec_vas - compress/decompress data using the 842 algorithm - * - * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. - * This compresses or decompresses the provided input buffer into the provided - * output buffer. - * - * Upon return from this function @outlen contains the length of the - * output data. If there is an error then @outlen will be 0 and an - * error will be specified by the return code from this function. - * - * The @workmem buffer should only be used by one function call at a time. - * - * @in: input buffer pointer - * @inlen: input buffer size - * @out: output buffer pointer - * @outlenp: output buffer size pointer - * @workmem: working memory buffer pointer, size determined by - * nx842_powernv_driver.workmem_size - * @fc: function code, see CCW Function Codes in nx-842.h - * - * Returns: - * 0 Success, output of length @outlenp stored in the buffer - * at @out - * -ENODEV Hardware unavailable - * -ENOSPC Output buffer is to small - * -EMSGSIZE Input buffer too large - * -EINVAL buffer constraints do not fix nx842_constraints - * -EPROTO hardware error during operation - * -ETIMEDOUT hardware did not complete operation in reasonable time - * -EINTR operation was aborted - */ -static int nx842_exec_vas(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlenp, - void *workmem, int fc) -{ - struct coprocessor_request_block *crb; - struct coprocessor_status_block *csb; - struct nx842_workmem *wmem; - struct vas_window *txwin; - int ret, i = 0; - u32 ccw; - unsigned int outlen = *outlenp; - - wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); - - *outlenp = 0; - - crb = &wmem->crb; - csb = &crb->csb; - - ret = nx842_config_crb(in, inlen, out, outlen, wmem); - if (ret) - return ret; - - ccw = 0; - ccw = SET_FIELD(CCW_FC_842, ccw, fc); - crb->ccw = cpu_to_be32(ccw); - - do { - wmem->start = ktime_get(); - preempt_disable(); - txwin = this_cpu_read(cpu_txwin); - - /* - * VAS copy CRB into L2 cache. Refer . - * @crb and @offset. - */ - vas_copy_crb(crb, 0); - - /* - * VAS paste previously copied CRB to NX. - * @txwin, @offset and @last (must be true). - */ - ret = vas_paste_crb(txwin, 0, 1); - preempt_enable(); - /* - * Retry copy/paste function for VAS failures. - */ - } while (ret && (i++ < VAS_RETRIES)); - - if (ret) { - pr_err_ratelimited("VAS copy/paste failed\n"); - return ret; - } - - ret = wait_for_csb(wmem, csb); - if (!ret) - *outlenp = be32_to_cpu(csb->count); - - return ret; -} - -/** - * nx842_powernv_compress - Compress data using the 842 algorithm - * - * Compression provided by the NX842 coprocessor on IBM PowerNV systems. - * The input buffer is compressed and the result is stored in the - * provided output buffer. - * - * Upon return from this function @outlen contains the length of the - * compressed data. If there is an error then @outlen will be 0 and an - * error will be specified by the return code from this function. - * - * @in: input buffer pointer - * @inlen: input buffer size - * @out: output buffer pointer - * @outlenp: output buffer size pointer - * @workmem: working memory buffer pointer, size determined by - * nx842_powernv_driver.workmem_size - * - * Returns: see @nx842_powernv_exec() - */ -static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlenp, - void *wmem) -{ - return nx842_powernv_exec(in, inlen, out, outlenp, - wmem, CCW_FC_842_COMP_CRC); -} - -/** - * nx842_powernv_decompress - Decompress data using the 842 algorithm - * - * Decompression provided by the NX842 coprocessor on IBM PowerNV systems. - * The input buffer is decompressed and the result is stored in the - * provided output buffer. - * - * Upon return from this function @outlen contains the length of the - * decompressed data. If there is an error then @outlen will be 0 and an - * error will be specified by the return code from this function. - * - * @in: input buffer pointer - * @inlen: input buffer size - * @out: output buffer pointer - * @outlenp: output buffer size pointer - * @workmem: working memory buffer pointer, size determined by - * nx842_powernv_driver.workmem_size - * - * Returns: see @nx842_powernv_exec() - */ -static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlenp, - void *wmem) -{ - return nx842_powernv_exec(in, inlen, out, outlenp, - wmem, CCW_FC_842_DECOMP_CRC); -} - -static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc, - int chipid) -{ - coproc->chip_id = chipid; - INIT_LIST_HEAD(&coproc->list); - list_add(&coproc->list, &nx842_coprocs); -} - -static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc) -{ - struct vas_window *txwin = NULL; - struct vas_tx_win_attr txattr; - - /* - * Kernel requests will be high priority. So open send - * windows only for high priority RxFIFO entries. - */ - vas_init_tx_win_attr(&txattr, coproc->ct); - txattr.lpid = 0; /* lpid is 0 for kernel requests */ - txattr.pid = 0; /* pid is 0 for kernel requests */ - - /* - * Open a VAS send window which is used to send request to NX. - */ - txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr); - if (IS_ERR(txwin)) - pr_err("ibm,nx-842: Can not open TX window: %ld\n", - PTR_ERR(txwin)); - - return txwin; -} - -/* - * Identify chip ID for each CPU, open send wndow for the corresponding NX - * engine and save txwin in percpu cpu_txwin. - * cpu_txwin is used in copy/paste operation for each compression / - * decompression request. - */ -static int nx842_open_percpu_txwins(void) -{ - struct nx842_coproc *coproc, *n; - unsigned int i, chip_id; - - for_each_possible_cpu(i) { - struct vas_window *txwin = NULL; - - chip_id = cpu_to_chip_id(i); - - list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { - /* - * Kernel requests use only high priority FIFOs. So - * open send windows for these FIFOs. - */ - - if (coproc->ct != VAS_COP_TYPE_842_HIPRI) - continue; - - if (coproc->chip_id == chip_id) { - txwin = nx842_alloc_txwin(coproc); - if (IS_ERR(txwin)) - return PTR_ERR(txwin); - - per_cpu(cpu_txwin, i) = txwin; - break; - } - } - - if (!per_cpu(cpu_txwin, i)) { - /* shouldn't happen, Each chip will have NX engine */ - pr_err("NX engine is not available for CPU %d\n", i); - return -EINVAL; - } - } - - return 0; -} - -static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, - int vasid, int *ct) -{ - struct vas_window *rxwin = NULL; - struct vas_rx_win_attr rxattr; - struct nx842_coproc *coproc; - u32 lpid, pid, tid, fifo_size; - u64 rx_fifo; - const char *priority; - int ret; - - ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo); - if (ret) { - pr_err("Missing rx-fifo-address property\n"); - return ret; - } - - ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size); - if (ret) { - pr_err("Missing rx-fifo-size property\n"); - return ret; - } - - ret = of_property_read_u32(dn, "lpid", &lpid); - if (ret) { - pr_err("Missing lpid property\n"); - return ret; - } - - ret = of_property_read_u32(dn, "pid", &pid); - if (ret) { - pr_err("Missing pid property\n"); - return ret; - } - - ret = of_property_read_u32(dn, "tid", &tid); - if (ret) { - pr_err("Missing tid property\n"); - return ret; - } - - ret = of_property_read_string(dn, "priority", &priority); - if (ret) { - pr_err("Missing priority property\n"); - return ret; - } - - coproc = kzalloc(sizeof(*coproc), GFP_KERNEL); - if (!coproc) - return -ENOMEM; - - if (!strcmp(priority, "High")) - coproc->ct = VAS_COP_TYPE_842_HIPRI; - else if (!strcmp(priority, "Normal")) - coproc->ct = VAS_COP_TYPE_842; - else { - pr_err("Invalid RxFIFO priority value\n"); - ret = -EINVAL; - goto err_out; - } - - vas_init_rx_win_attr(&rxattr, coproc->ct); - rxattr.rx_fifo = (void *)rx_fifo; - rxattr.rx_fifo_size = fifo_size; - rxattr.lnotify_lpid = lpid; - rxattr.lnotify_pid = pid; - rxattr.lnotify_tid = tid; - /* - * Maximum RX window credits can not be more than #CRBs in - * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns. - */ - rxattr.wcreds_max = fifo_size / CRB_SIZE; - - /* - * Open a VAS receice window which is used to configure RxFIFO - * for NX. - */ - rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr); - if (IS_ERR(rxwin)) { - ret = PTR_ERR(rxwin); - pr_err("setting RxFIFO with VAS failed: %d\n", - ret); - goto err_out; - } - - coproc->vas.rxwin = rxwin; - coproc->vas.id = vasid; - nx842_add_coprocs_list(coproc, chip_id); - - /* - * (lpid, pid, tid) combination has to be unique for each - * coprocessor instance in the system. So to make it - * unique, skiboot uses coprocessor type such as 842 or - * GZIP for pid and provides this value to kernel in pid - * device-tree property. - */ - *ct = pid; - - return 0; - -err_out: - kfree(coproc); - return ret; -} - - -static int __init nx842_powernv_probe_vas(struct device_node *pn) -{ - struct device_node *dn; - int chip_id, vasid, ret = 0; - int nx_fifo_found = 0; - int uninitialized_var(ct); - - chip_id = of_get_ibm_chip_id(pn); - if (chip_id < 0) { - pr_err("ibm,chip-id missing\n"); - return -EINVAL; - } - - vasid = chip_to_vas_id(chip_id); - if (vasid < 0) { - pr_err("Unable to map chip_id %d to vasid\n", chip_id); - return -EINVAL; - } - - for_each_child_of_node(pn, dn) { - if (of_device_is_compatible(dn, "ibm,p9-nx-842")) { - ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct); - if (ret) { - of_node_put(dn); - return ret; - } - nx_fifo_found++; - } - } - - if (!nx_fifo_found) { - pr_err("NX842 FIFO nodes are missing\n"); - return -EINVAL; - } - - /* - * Initialize NX instance for both high and normal priority FIFOs. - */ - if (opal_check_token(OPAL_NX_COPROC_INIT)) { - ret = opal_nx_coproc_init(chip_id, ct); - if (ret) { - pr_err("Failed to initialize NX for chip(%d): %d\n", - chip_id, ret); - ret = opal_error_code(ret); - } - } else - pr_warn("Firmware doesn't support NX initialization\n"); - - return ret; -} - -static int __init nx842_powernv_probe(struct device_node *dn) -{ - struct nx842_coproc *coproc; - unsigned int ct, ci; - int chip_id; - - chip_id = of_get_ibm_chip_id(dn); - if (chip_id < 0) { - pr_err("ibm,chip-id missing\n"); - return -EINVAL; - } - - if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) { - pr_err("ibm,842-coprocessor-type missing\n"); - return -EINVAL; - } - - if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) { - pr_err("ibm,842-coprocessor-instance missing\n"); - return -EINVAL; - } - - coproc = kzalloc(sizeof(*coproc), GFP_KERNEL); - if (!coproc) - return -ENOMEM; - - coproc->ct = ct; - coproc->ci = ci; - nx842_add_coprocs_list(coproc, chip_id); - - pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); - - if (!nx842_ct) - nx842_ct = ct; - else if (nx842_ct != ct) - pr_err("NX842 chip %d, CT %d != first found CT %d\n", - chip_id, ct, nx842_ct); - - return 0; -} - -static void nx842_delete_coprocs(void) -{ - struct nx842_coproc *coproc, *n; - struct vas_window *txwin; - int i; - - /* - * close percpu txwins that are opened for the corresponding coproc. - */ - for_each_possible_cpu(i) { - txwin = per_cpu(cpu_txwin, i); - if (txwin) - vas_win_close(txwin); - - per_cpu(cpu_txwin, i) = 0; - } - - list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { - if (coproc->vas.rxwin) - vas_win_close(coproc->vas.rxwin); - - list_del(&coproc->list); - kfree(coproc); - } -} - -static struct nx842_constraints nx842_powernv_constraints = { - .alignment = DDE_BUFFER_ALIGN, - .multiple = DDE_BUFFER_LAST_MULT, - .minimum = DDE_BUFFER_LAST_MULT, - .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE, -}; - -static struct nx842_driver nx842_powernv_driver = { - .name = KBUILD_MODNAME, - .owner = THIS_MODULE, - .workmem_size = sizeof(struct nx842_workmem), - .constraints = &nx842_powernv_constraints, - .compress = nx842_powernv_compress, - .decompress = nx842_powernv_decompress, -}; - -static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) -{ - return nx842_crypto_init(tfm, &nx842_powernv_driver); -} - -static struct crypto_alg nx842_powernv_alg = { - .cra_name = "842", - .cra_driver_name = "842-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct nx842_crypto_ctx), - .cra_module = THIS_MODULE, - .cra_init = nx842_powernv_crypto_init, - .cra_exit = nx842_crypto_exit, - .cra_u = { .compress = { - .coa_compress = nx842_crypto_compress, - .coa_decompress = nx842_crypto_decompress } } -}; - -static __init int nx842_powernv_init(void) -{ - struct device_node *dn; - int ret; - - /* verify workmem size/align restrictions */ - BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); - BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN); - BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN); - /* verify buffer size/align restrictions */ - BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN); - BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); - BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); - - for_each_compatible_node(dn, NULL, "ibm,power9-nx") { - ret = nx842_powernv_probe_vas(dn); - if (ret) { - nx842_delete_coprocs(); - of_node_put(dn); - return ret; - } - } - - if (list_empty(&nx842_coprocs)) { - for_each_compatible_node(dn, NULL, "ibm,power-nx") - nx842_powernv_probe(dn); - - if (!nx842_ct) - return -ENODEV; - - nx842_powernv_exec = nx842_exec_icswx; - } else { - ret = nx842_open_percpu_txwins(); - if (ret) { - nx842_delete_coprocs(); - return ret; - } - - nx842_powernv_exec = nx842_exec_vas; - } - - ret = crypto_register_alg(&nx842_powernv_alg); - if (ret) { - nx842_delete_coprocs(); - return ret; - } - - return 0; -} -module_init(nx842_powernv_init); - -static void __exit nx842_powernv_exit(void) -{ - crypto_unregister_alg(&nx842_powernv_alg); - - nx842_delete_coprocs(); -} -module_exit(nx842_powernv_exit); diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c new file mode 100644 index 000000000000..f42881fbed2f --- /dev/null +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -0,0 +1,1062 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for IBM PowerNV compression accelerator + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "nx-842.h" + +#include + +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dan Streetman "); +MODULE_DESCRIPTION("H/W Compression driver for IBM PowerNV processors"); +MODULE_ALIAS_CRYPTO("842"); +MODULE_ALIAS_CRYPTO("842-nx"); + +#define WORKMEM_ALIGN (CRB_ALIGN) +#define CSB_WAIT_MAX (5000) /* ms */ +#define VAS_RETRIES (10) + +struct nx842_workmem { + /* Below fields must be properly aligned */ + struct coprocessor_request_block crb; /* CRB_ALIGN align */ + struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */ + struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */ + /* Above fields must be properly aligned */ + + ktime_t start; + + char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */ +} __packed __aligned(WORKMEM_ALIGN); + +struct nx842_coproc { + unsigned int chip_id; + unsigned int ct; + unsigned int ci; /* Coprocessor instance, used with icswx */ + struct { + struct vas_window *rxwin; + int id; + } vas; + struct list_head list; +}; + +/* + * Send the request to NX engine on the chip for the corresponding CPU + * where the process is executing. Use with VAS function. + */ +static DEFINE_PER_CPU(struct vas_window *, cpu_txwin); + +/* no cpu hotplug on powernv, so this list never changes after init */ +static LIST_HEAD(nx842_coprocs); +static unsigned int nx842_ct; /* used in icswx function */ + +static int (*nx842_powernv_exec)(const unsigned char *in, + unsigned int inlen, unsigned char *out, + unsigned int *outlenp, void *workmem, int fc); + +/** + * setup_indirect_dde - Setup an indirect DDE + * + * The DDE is setup with the the DDE count, byte count, and address of + * first direct DDE in the list. + */ +static void setup_indirect_dde(struct data_descriptor_entry *dde, + struct data_descriptor_entry *ddl, + unsigned int dde_count, unsigned int byte_count) +{ + dde->flags = 0; + dde->count = dde_count; + dde->index = 0; + dde->length = cpu_to_be32(byte_count); + dde->address = cpu_to_be64(nx842_get_pa(ddl)); +} + +/** + * setup_direct_dde - Setup single DDE from buffer + * + * The DDE is setup with the buffer and length. The buffer must be properly + * aligned. The used length is returned. + * Returns: + * N Successfully set up DDE with N bytes + */ +static unsigned int setup_direct_dde(struct data_descriptor_entry *dde, + unsigned long pa, unsigned int len) +{ + unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa)); + + dde->flags = 0; + dde->count = 0; + dde->index = 0; + dde->length = cpu_to_be32(l); + dde->address = cpu_to_be64(pa); + + return l; +} + +/** + * setup_ddl - Setup DDL from buffer + * + * Returns: + * 0 Successfully set up DDL + */ +static int setup_ddl(struct data_descriptor_entry *dde, + struct data_descriptor_entry *ddl, + unsigned char *buf, unsigned int len, + bool in) +{ + unsigned long pa = nx842_get_pa(buf); + int i, ret, total_len = len; + + if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) { + pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n", + in ? "input" : "output", pa, DDE_BUFFER_ALIGN); + return -EINVAL; + } + + /* only need to check last mult; since buffer must be + * DDE_BUFFER_ALIGN aligned, and that is a multiple of + * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers + * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT. + */ + if (len % DDE_BUFFER_LAST_MULT) { + pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n", + in ? "input" : "output", len, DDE_BUFFER_LAST_MULT); + if (in) + return -EINVAL; + len = round_down(len, DDE_BUFFER_LAST_MULT); + } + + /* use a single direct DDE */ + if (len <= LEN_ON_PAGE(pa)) { + ret = setup_direct_dde(dde, pa, len); + WARN_ON(ret < len); + return 0; + } + + /* use the DDL */ + for (i = 0; i < DDL_LEN_MAX && len > 0; i++) { + ret = setup_direct_dde(&ddl[i], pa, len); + buf += ret; + len -= ret; + pa = nx842_get_pa(buf); + } + + if (len > 0) { + pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n", + total_len, in ? "input" : "output", len); + if (in) + return -EMSGSIZE; + total_len -= len; + } + setup_indirect_dde(dde, ddl, i, total_len); + + return 0; +} + +#define CSB_ERR(csb, msg, ...) \ + pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \ + ##__VA_ARGS__, (csb)->flags, \ + (csb)->cs, (csb)->cc, (csb)->ce, \ + be32_to_cpu((csb)->count)) + +#define CSB_ERR_ADDR(csb, msg, ...) \ + CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \ + (unsigned long)be64_to_cpu((csb)->address)) + +/** + * wait_for_csb + */ +static int wait_for_csb(struct nx842_workmem *wmem, + struct coprocessor_status_block *csb) +{ + ktime_t start = wmem->start, now = ktime_get(); + ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); + + while (!(READ_ONCE(csb->flags) & CSB_V)) { + cpu_relax(); + now = ktime_get(); + if (ktime_after(now, timeout)) + break; + } + + /* hw has updated csb and output buffer */ + barrier(); + + /* check CSB flags */ + if (!(csb->flags & CSB_V)) { + CSB_ERR(csb, "CSB still not valid after %ld us, giving up", + (long)ktime_us_delta(now, start)); + return -ETIMEDOUT; + } + if (csb->flags & CSB_F) { + CSB_ERR(csb, "Invalid CSB format"); + return -EPROTO; + } + if (csb->flags & CSB_CH) { + CSB_ERR(csb, "Invalid CSB chaining state"); + return -EPROTO; + } + + /* verify CSB completion sequence is 0 */ + if (csb->cs) { + CSB_ERR(csb, "Invalid CSB completion sequence"); + return -EPROTO; + } + + /* check CSB Completion Code */ + switch (csb->cc) { + /* no error */ + case CSB_CC_SUCCESS: + break; + case CSB_CC_TPBC_GT_SPBC: + /* not an error, but the compressed data is + * larger than the uncompressed data :( + */ + break; + + /* input data errors */ + case CSB_CC_OPERAND_OVERLAP: + /* input and output buffers overlap */ + CSB_ERR(csb, "Operand Overlap error"); + return -EINVAL; + case CSB_CC_INVALID_OPERAND: + CSB_ERR(csb, "Invalid operand"); + return -EINVAL; + case CSB_CC_NOSPC: + /* output buffer too small */ + return -ENOSPC; + case CSB_CC_ABORT: + CSB_ERR(csb, "Function aborted"); + return -EINTR; + case CSB_CC_CRC_MISMATCH: + CSB_ERR(csb, "CRC mismatch"); + return -EINVAL; + case CSB_CC_TEMPL_INVALID: + CSB_ERR(csb, "Compressed data template invalid"); + return -EINVAL; + case CSB_CC_TEMPL_OVERFLOW: + CSB_ERR(csb, "Compressed data template shows data past end"); + return -EINVAL; + case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */ + /* + * DDE byte count exceeds the limit specified in Maximum + * byte count register. + */ + CSB_ERR(csb, "DDE byte count exceeds the limit"); + return -EINVAL; + + /* these should not happen */ + case CSB_CC_INVALID_ALIGN: + /* setup_ddl should have detected this */ + CSB_ERR_ADDR(csb, "Invalid alignment"); + return -EINVAL; + case CSB_CC_DATA_LENGTH: + /* setup_ddl should have detected this */ + CSB_ERR(csb, "Invalid data length"); + return -EINVAL; + case CSB_CC_WR_TRANSLATION: + case CSB_CC_TRANSLATION: + case CSB_CC_TRANSLATION_DUP1: + case CSB_CC_TRANSLATION_DUP2: + case CSB_CC_TRANSLATION_DUP3: + case CSB_CC_TRANSLATION_DUP4: + case CSB_CC_TRANSLATION_DUP5: + case CSB_CC_TRANSLATION_DUP6: + /* should not happen, we use physical addrs */ + CSB_ERR_ADDR(csb, "Translation error"); + return -EPROTO; + case CSB_CC_WR_PROTECTION: + case CSB_CC_PROTECTION: + case CSB_CC_PROTECTION_DUP1: + case CSB_CC_PROTECTION_DUP2: + case CSB_CC_PROTECTION_DUP3: + case CSB_CC_PROTECTION_DUP4: + case CSB_CC_PROTECTION_DUP5: + case CSB_CC_PROTECTION_DUP6: + /* should not happen, we use physical addrs */ + CSB_ERR_ADDR(csb, "Protection error"); + return -EPROTO; + case CSB_CC_PRIVILEGE: + /* shouldn't happen, we're in HYP mode */ + CSB_ERR(csb, "Insufficient Privilege error"); + return -EPROTO; + case CSB_CC_EXCESSIVE_DDE: + /* shouldn't happen, setup_ddl doesn't use many dde's */ + CSB_ERR(csb, "Too many DDEs in DDL"); + return -EINVAL; + case CSB_CC_TRANSPORT: + case CSB_CC_INVALID_CRB: /* P9 or later */ + /* shouldn't happen, we setup CRB correctly */ + CSB_ERR(csb, "Invalid CRB"); + return -EINVAL; + case CSB_CC_INVALID_DDE: /* P9 or later */ + /* + * shouldn't happen, setup_direct/indirect_dde creates + * DDE right + */ + CSB_ERR(csb, "Invalid DDE"); + return -EINVAL; + case CSB_CC_SEGMENTED_DDL: + /* shouldn't happen, setup_ddl creates DDL right */ + CSB_ERR(csb, "Segmented DDL error"); + return -EINVAL; + case CSB_CC_DDE_OVERFLOW: + /* shouldn't happen, setup_ddl creates DDL right */ + CSB_ERR(csb, "DDE overflow error"); + return -EINVAL; + case CSB_CC_SESSION: + /* should not happen with ICSWX */ + CSB_ERR(csb, "Session violation error"); + return -EPROTO; + case CSB_CC_CHAIN: + /* should not happen, we don't use chained CRBs */ + CSB_ERR(csb, "Chained CRB error"); + return -EPROTO; + case CSB_CC_SEQUENCE: + /* should not happen, we don't use chained CRBs */ + CSB_ERR(csb, "CRB sequence number error"); + return -EPROTO; + case CSB_CC_UNKNOWN_CODE: + CSB_ERR(csb, "Unknown subfunction code"); + return -EPROTO; + + /* hardware errors */ + case CSB_CC_RD_EXTERNAL: + case CSB_CC_RD_EXTERNAL_DUP1: + case CSB_CC_RD_EXTERNAL_DUP2: + case CSB_CC_RD_EXTERNAL_DUP3: + CSB_ERR_ADDR(csb, "Read error outside coprocessor"); + return -EPROTO; + case CSB_CC_WR_EXTERNAL: + CSB_ERR_ADDR(csb, "Write error outside coprocessor"); + return -EPROTO; + case CSB_CC_INTERNAL: + CSB_ERR(csb, "Internal error in coprocessor"); + return -EPROTO; + case CSB_CC_PROVISION: + CSB_ERR(csb, "Storage provision error"); + return -EPROTO; + case CSB_CC_HW: + CSB_ERR(csb, "Correctable hardware error"); + return -EPROTO; + case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */ + CSB_ERR(csb, "Job did not finish within allowed time"); + return -EPROTO; + + default: + CSB_ERR(csb, "Invalid CC %d", csb->cc); + return -EPROTO; + } + + /* check Completion Extension state */ + if (csb->ce & CSB_CE_TERMINATION) { + CSB_ERR(csb, "CSB request was terminated"); + return -EPROTO; + } + if (csb->ce & CSB_CE_INCOMPLETE) { + CSB_ERR(csb, "CSB request not complete"); + return -EPROTO; + } + if (!(csb->ce & CSB_CE_TPBC)) { + CSB_ERR(csb, "TPBC not provided, unknown target length"); + return -EPROTO; + } + + /* successful completion */ + pr_debug_ratelimited("Processed %u bytes in %lu us\n", + be32_to_cpu(csb->count), + (unsigned long)ktime_us_delta(now, start)); + + return 0; +} + +static int nx842_config_crb(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int outlen, + struct nx842_workmem *wmem) +{ + struct coprocessor_request_block *crb; + struct coprocessor_status_block *csb; + u64 csb_addr; + int ret; + + crb = &wmem->crb; + csb = &crb->csb; + + /* Clear any previous values */ + memset(crb, 0, sizeof(*crb)); + + /* set up DDLs */ + ret = setup_ddl(&crb->source, wmem->ddl_in, + (unsigned char *)in, inlen, true); + if (ret) + return ret; + + ret = setup_ddl(&crb->target, wmem->ddl_out, + out, outlen, false); + if (ret) + return ret; + + /* set up CRB's CSB addr */ + csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS; + csb_addr |= CRB_CSB_AT; /* Addrs are phys */ + crb->csb_addr = cpu_to_be64(csb_addr); + + return 0; +} + +/** + * nx842_exec_icswx - compress/decompress data using the 842 algorithm + * + * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. + * This compresses or decompresses the provided input buffer into the provided + * output buffer. + * + * Upon return from this function @outlen contains the length of the + * output data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * The @workmem buffer should only be used by one function call at a time. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size + * @fc: function code, see CCW Function Codes in nx-842.h + * + * Returns: + * 0 Success, output of length @outlenp stored in the buffer at @out + * -ENODEV Hardware unavailable + * -ENOSPC Output buffer is to small + * -EMSGSIZE Input buffer too large + * -EINVAL buffer constraints do not fix nx842_constraints + * -EPROTO hardware error during operation + * -ETIMEDOUT hardware did not complete operation in reasonable time + * -EINTR operation was aborted + */ +static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *workmem, int fc) +{ + struct coprocessor_request_block *crb; + struct coprocessor_status_block *csb; + struct nx842_workmem *wmem; + int ret; + u32 ccw; + unsigned int outlen = *outlenp; + + wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); + + *outlenp = 0; + + /* shoudn't happen, we don't load without a coproc */ + if (!nx842_ct) { + pr_err_ratelimited("coprocessor CT is 0"); + return -ENODEV; + } + + ret = nx842_config_crb(in, inlen, out, outlen, wmem); + if (ret) + return ret; + + crb = &wmem->crb; + csb = &crb->csb; + + /* set up CCW */ + ccw = 0; + ccw = SET_FIELD(CCW_CT, ccw, nx842_ct); + ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */ + ccw = SET_FIELD(CCW_FC_842, ccw, fc); + + wmem->start = ktime_get(); + + /* do ICSWX */ + ret = icswx(cpu_to_be32(ccw), crb); + + pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret, + (unsigned int)ccw, + (unsigned int)be32_to_cpu(crb->ccw)); + + /* + * NX842 coprocessor sets 3rd bit in CR register with XER[S0]. + * XER[S0] is the integer summary overflow bit which is nothing + * to do NX. Since this bit can be set with other return values, + * mask this bit. + */ + ret &= ~ICSWX_XERS0; + + switch (ret) { + case ICSWX_INITIATED: + ret = wait_for_csb(wmem, csb); + break; + case ICSWX_BUSY: + pr_debug_ratelimited("842 Coprocessor busy\n"); + ret = -EBUSY; + break; + case ICSWX_REJECTED: + pr_err_ratelimited("ICSWX rejected\n"); + ret = -EPROTO; + break; + } + + if (!ret) + *outlenp = be32_to_cpu(csb->count); + + return ret; +} + +/** + * nx842_exec_vas - compress/decompress data using the 842 algorithm + * + * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. + * This compresses or decompresses the provided input buffer into the provided + * output buffer. + * + * Upon return from this function @outlen contains the length of the + * output data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * The @workmem buffer should only be used by one function call at a time. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size + * @fc: function code, see CCW Function Codes in nx-842.h + * + * Returns: + * 0 Success, output of length @outlenp stored in the buffer + * at @out + * -ENODEV Hardware unavailable + * -ENOSPC Output buffer is to small + * -EMSGSIZE Input buffer too large + * -EINVAL buffer constraints do not fix nx842_constraints + * -EPROTO hardware error during operation + * -ETIMEDOUT hardware did not complete operation in reasonable time + * -EINTR operation was aborted + */ +static int nx842_exec_vas(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *workmem, int fc) +{ + struct coprocessor_request_block *crb; + struct coprocessor_status_block *csb; + struct nx842_workmem *wmem; + struct vas_window *txwin; + int ret, i = 0; + u32 ccw; + unsigned int outlen = *outlenp; + + wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); + + *outlenp = 0; + + crb = &wmem->crb; + csb = &crb->csb; + + ret = nx842_config_crb(in, inlen, out, outlen, wmem); + if (ret) + return ret; + + ccw = 0; + ccw = SET_FIELD(CCW_FC_842, ccw, fc); + crb->ccw = cpu_to_be32(ccw); + + do { + wmem->start = ktime_get(); + preempt_disable(); + txwin = this_cpu_read(cpu_txwin); + + /* + * VAS copy CRB into L2 cache. Refer . + * @crb and @offset. + */ + vas_copy_crb(crb, 0); + + /* + * VAS paste previously copied CRB to NX. + * @txwin, @offset and @last (must be true). + */ + ret = vas_paste_crb(txwin, 0, 1); + preempt_enable(); + /* + * Retry copy/paste function for VAS failures. + */ + } while (ret && (i++ < VAS_RETRIES)); + + if (ret) { + pr_err_ratelimited("VAS copy/paste failed\n"); + return ret; + } + + ret = wait_for_csb(wmem, csb); + if (!ret) + *outlenp = be32_to_cpu(csb->count); + + return ret; +} + +/** + * nx842_powernv_compress - Compress data using the 842 algorithm + * + * Compression provided by the NX842 coprocessor on IBM PowerNV systems. + * The input buffer is compressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * compressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size + * + * Returns: see @nx842_powernv_exec() + */ +static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *wmem) +{ + return nx842_powernv_exec(in, inlen, out, outlenp, + wmem, CCW_FC_842_COMP_CRC); +} + +/** + * nx842_powernv_decompress - Decompress data using the 842 algorithm + * + * Decompression provided by the NX842 coprocessor on IBM PowerNV systems. + * The input buffer is decompressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * decompressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size + * + * Returns: see @nx842_powernv_exec() + */ +static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *wmem) +{ + return nx842_powernv_exec(in, inlen, out, outlenp, + wmem, CCW_FC_842_DECOMP_CRC); +} + +static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc, + int chipid) +{ + coproc->chip_id = chipid; + INIT_LIST_HEAD(&coproc->list); + list_add(&coproc->list, &nx842_coprocs); +} + +static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc) +{ + struct vas_window *txwin = NULL; + struct vas_tx_win_attr txattr; + + /* + * Kernel requests will be high priority. So open send + * windows only for high priority RxFIFO entries. + */ + vas_init_tx_win_attr(&txattr, coproc->ct); + txattr.lpid = 0; /* lpid is 0 for kernel requests */ + txattr.pid = 0; /* pid is 0 for kernel requests */ + + /* + * Open a VAS send window which is used to send request to NX. + */ + txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr); + if (IS_ERR(txwin)) + pr_err("ibm,nx-842: Can not open TX window: %ld\n", + PTR_ERR(txwin)); + + return txwin; +} + +/* + * Identify chip ID for each CPU, open send wndow for the corresponding NX + * engine and save txwin in percpu cpu_txwin. + * cpu_txwin is used in copy/paste operation for each compression / + * decompression request. + */ +static int nx842_open_percpu_txwins(void) +{ + struct nx842_coproc *coproc, *n; + unsigned int i, chip_id; + + for_each_possible_cpu(i) { + struct vas_window *txwin = NULL; + + chip_id = cpu_to_chip_id(i); + + list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + /* + * Kernel requests use only high priority FIFOs. So + * open send windows for these FIFOs. + */ + + if (coproc->ct != VAS_COP_TYPE_842_HIPRI) + continue; + + if (coproc->chip_id == chip_id) { + txwin = nx842_alloc_txwin(coproc); + if (IS_ERR(txwin)) + return PTR_ERR(txwin); + + per_cpu(cpu_txwin, i) = txwin; + break; + } + } + + if (!per_cpu(cpu_txwin, i)) { + /* shouldn't happen, Each chip will have NX engine */ + pr_err("NX engine is not available for CPU %d\n", i); + return -EINVAL; + } + } + + return 0; +} + +static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, + int vasid, int *ct) +{ + struct vas_window *rxwin = NULL; + struct vas_rx_win_attr rxattr; + struct nx842_coproc *coproc; + u32 lpid, pid, tid, fifo_size; + u64 rx_fifo; + const char *priority; + int ret; + + ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo); + if (ret) { + pr_err("Missing rx-fifo-address property\n"); + return ret; + } + + ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size); + if (ret) { + pr_err("Missing rx-fifo-size property\n"); + return ret; + } + + ret = of_property_read_u32(dn, "lpid", &lpid); + if (ret) { + pr_err("Missing lpid property\n"); + return ret; + } + + ret = of_property_read_u32(dn, "pid", &pid); + if (ret) { + pr_err("Missing pid property\n"); + return ret; + } + + ret = of_property_read_u32(dn, "tid", &tid); + if (ret) { + pr_err("Missing tid property\n"); + return ret; + } + + ret = of_property_read_string(dn, "priority", &priority); + if (ret) { + pr_err("Missing priority property\n"); + return ret; + } + + coproc = kzalloc(sizeof(*coproc), GFP_KERNEL); + if (!coproc) + return -ENOMEM; + + if (!strcmp(priority, "High")) + coproc->ct = VAS_COP_TYPE_842_HIPRI; + else if (!strcmp(priority, "Normal")) + coproc->ct = VAS_COP_TYPE_842; + else { + pr_err("Invalid RxFIFO priority value\n"); + ret = -EINVAL; + goto err_out; + } + + vas_init_rx_win_attr(&rxattr, coproc->ct); + rxattr.rx_fifo = (void *)rx_fifo; + rxattr.rx_fifo_size = fifo_size; + rxattr.lnotify_lpid = lpid; + rxattr.lnotify_pid = pid; + rxattr.lnotify_tid = tid; + /* + * Maximum RX window credits can not be more than #CRBs in + * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns. + */ + rxattr.wcreds_max = fifo_size / CRB_SIZE; + + /* + * Open a VAS receice window which is used to configure RxFIFO + * for NX. + */ + rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr); + if (IS_ERR(rxwin)) { + ret = PTR_ERR(rxwin); + pr_err("setting RxFIFO with VAS failed: %d\n", + ret); + goto err_out; + } + + coproc->vas.rxwin = rxwin; + coproc->vas.id = vasid; + nx842_add_coprocs_list(coproc, chip_id); + + /* + * (lpid, pid, tid) combination has to be unique for each + * coprocessor instance in the system. So to make it + * unique, skiboot uses coprocessor type such as 842 or + * GZIP for pid and provides this value to kernel in pid + * device-tree property. + */ + *ct = pid; + + return 0; + +err_out: + kfree(coproc); + return ret; +} + + +static int __init nx842_powernv_probe_vas(struct device_node *pn) +{ + struct device_node *dn; + int chip_id, vasid, ret = 0; + int nx_fifo_found = 0; + int uninitialized_var(ct); + + chip_id = of_get_ibm_chip_id(pn); + if (chip_id < 0) { + pr_err("ibm,chip-id missing\n"); + return -EINVAL; + } + + vasid = chip_to_vas_id(chip_id); + if (vasid < 0) { + pr_err("Unable to map chip_id %d to vasid\n", chip_id); + return -EINVAL; + } + + for_each_child_of_node(pn, dn) { + if (of_device_is_compatible(dn, "ibm,p9-nx-842")) { + ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct); + if (ret) { + of_node_put(dn); + return ret; + } + nx_fifo_found++; + } + } + + if (!nx_fifo_found) { + pr_err("NX842 FIFO nodes are missing\n"); + return -EINVAL; + } + + /* + * Initialize NX instance for both high and normal priority FIFOs. + */ + if (opal_check_token(OPAL_NX_COPROC_INIT)) { + ret = opal_nx_coproc_init(chip_id, ct); + if (ret) { + pr_err("Failed to initialize NX for chip(%d): %d\n", + chip_id, ret); + ret = opal_error_code(ret); + } + } else + pr_warn("Firmware doesn't support NX initialization\n"); + + return ret; +} + +static int __init nx842_powernv_probe(struct device_node *dn) +{ + struct nx842_coproc *coproc; + unsigned int ct, ci; + int chip_id; + + chip_id = of_get_ibm_chip_id(dn); + if (chip_id < 0) { + pr_err("ibm,chip-id missing\n"); + return -EINVAL; + } + + if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) { + pr_err("ibm,842-coprocessor-type missing\n"); + return -EINVAL; + } + + if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) { + pr_err("ibm,842-coprocessor-instance missing\n"); + return -EINVAL; + } + + coproc = kzalloc(sizeof(*coproc), GFP_KERNEL); + if (!coproc) + return -ENOMEM; + + coproc->ct = ct; + coproc->ci = ci; + nx842_add_coprocs_list(coproc, chip_id); + + pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); + + if (!nx842_ct) + nx842_ct = ct; + else if (nx842_ct != ct) + pr_err("NX842 chip %d, CT %d != first found CT %d\n", + chip_id, ct, nx842_ct); + + return 0; +} + +static void nx842_delete_coprocs(void) +{ + struct nx842_coproc *coproc, *n; + struct vas_window *txwin; + int i; + + /* + * close percpu txwins that are opened for the corresponding coproc. + */ + for_each_possible_cpu(i) { + txwin = per_cpu(cpu_txwin, i); + if (txwin) + vas_win_close(txwin); + + per_cpu(cpu_txwin, i) = 0; + } + + list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + if (coproc->vas.rxwin) + vas_win_close(coproc->vas.rxwin); + + list_del(&coproc->list); + kfree(coproc); + } +} + +static struct nx842_constraints nx842_powernv_constraints = { + .alignment = DDE_BUFFER_ALIGN, + .multiple = DDE_BUFFER_LAST_MULT, + .minimum = DDE_BUFFER_LAST_MULT, + .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE, +}; + +static struct nx842_driver nx842_powernv_driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .workmem_size = sizeof(struct nx842_workmem), + .constraints = &nx842_powernv_constraints, + .compress = nx842_powernv_compress, + .decompress = nx842_powernv_decompress, +}; + +static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) +{ + return nx842_crypto_init(tfm, &nx842_powernv_driver); +} + +static struct crypto_alg nx842_powernv_alg = { + .cra_name = "842", + .cra_driver_name = "842-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct nx842_crypto_ctx), + .cra_module = THIS_MODULE, + .cra_init = nx842_powernv_crypto_init, + .cra_exit = nx842_crypto_exit, + .cra_u = { .compress = { + .coa_compress = nx842_crypto_compress, + .coa_decompress = nx842_crypto_decompress } } +}; + +static __init int nx842_powernv_init(void) +{ + struct device_node *dn; + int ret; + + /* verify workmem size/align restrictions */ + BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); + BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN); + BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN); + /* verify buffer size/align restrictions */ + BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN); + BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); + BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); + + for_each_compatible_node(dn, NULL, "ibm,power9-nx") { + ret = nx842_powernv_probe_vas(dn); + if (ret) { + nx842_delete_coprocs(); + of_node_put(dn); + return ret; + } + } + + if (list_empty(&nx842_coprocs)) { + for_each_compatible_node(dn, NULL, "ibm,power-nx") + nx842_powernv_probe(dn); + + if (!nx842_ct) + return -ENODEV; + + nx842_powernv_exec = nx842_exec_icswx; + } else { + ret = nx842_open_percpu_txwins(); + if (ret) { + nx842_delete_coprocs(); + return ret; + } + + nx842_powernv_exec = nx842_exec_vas; + } + + ret = crypto_register_alg(&nx842_powernv_alg); + if (ret) { + nx842_delete_coprocs(); + return ret; + } + + return 0; +} +module_init(nx842_powernv_init); + +static void __exit nx842_powernv_exit(void) +{ + crypto_unregister_alg(&nx842_powernv_alg); + + nx842_delete_coprocs(); +} +module_exit(nx842_powernv_exit); -- cgit v1.2.3-59-g8ed1b From 4aebf3ce26ca2128433b615cd2535c22b03c8fa3 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:09:29 -0700 Subject: crypto/nx: Make enable code generic to add new GZIP compression type Make setup and enable code generic to support new GZIP compression type. Changed nx842 reference to nx and moved some code to new functions. Functionality is not changed except sparse warning fix - setting NULL instead of 0 for per_cpu send window in nx_delete_coprocs(). Signed-off-by: Haren Myneni Acked-by: Herbert Xu Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114569.2275.1126.camel@hbabu-laptop --- drivers/crypto/nx/nx-common-powernv.c | 161 +++++++++++++++++++++------------- 1 file changed, 101 insertions(+), 60 deletions(-) diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index f42881fbed2f..82dfa60ef3f4 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -40,9 +40,9 @@ struct nx842_workmem { char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */ } __packed __aligned(WORKMEM_ALIGN); -struct nx842_coproc { +struct nx_coproc { unsigned int chip_id; - unsigned int ct; + unsigned int ct; /* Can be 842 or GZIP high/normal*/ unsigned int ci; /* Coprocessor instance, used with icswx */ struct { struct vas_window *rxwin; @@ -58,9 +58,15 @@ struct nx842_coproc { static DEFINE_PER_CPU(struct vas_window *, cpu_txwin); /* no cpu hotplug on powernv, so this list never changes after init */ -static LIST_HEAD(nx842_coprocs); +static LIST_HEAD(nx_coprocs); static unsigned int nx842_ct; /* used in icswx function */ +/* + * Using same values as in skiboot or coprocessor type representing + * in NX workbook. + */ +#define NX_CT_842 (3) + static int (*nx842_powernv_exec)(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *workmem, int fc); @@ -666,15 +672,15 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, wmem, CCW_FC_842_DECOMP_CRC); } -static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc, +static inline void nx_add_coprocs_list(struct nx_coproc *coproc, int chipid) { coproc->chip_id = chipid; INIT_LIST_HEAD(&coproc->list); - list_add(&coproc->list, &nx842_coprocs); + list_add(&coproc->list, &nx_coprocs); } -static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc) +static struct vas_window *nx_alloc_txwin(struct nx_coproc *coproc) { struct vas_window *txwin = NULL; struct vas_tx_win_attr txattr; @@ -704,9 +710,9 @@ static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc) * cpu_txwin is used in copy/paste operation for each compression / * decompression request. */ -static int nx842_open_percpu_txwins(void) +static int nx_open_percpu_txwins(void) { - struct nx842_coproc *coproc, *n; + struct nx_coproc *coproc, *n; unsigned int i, chip_id; for_each_possible_cpu(i) { @@ -714,17 +720,18 @@ static int nx842_open_percpu_txwins(void) chip_id = cpu_to_chip_id(i); - list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + list_for_each_entry_safe(coproc, n, &nx_coprocs, list) { /* * Kernel requests use only high priority FIFOs. So * open send windows for these FIFOs. + * GZIP is not supported in kernel right now. */ if (coproc->ct != VAS_COP_TYPE_842_HIPRI) continue; if (coproc->chip_id == chip_id) { - txwin = nx842_alloc_txwin(coproc); + txwin = nx_alloc_txwin(coproc); if (IS_ERR(txwin)) return PTR_ERR(txwin); @@ -743,13 +750,28 @@ static int nx842_open_percpu_txwins(void) return 0; } +static int __init nx_set_ct(struct nx_coproc *coproc, const char *priority, + int high, int normal) +{ + if (!strcmp(priority, "High")) + coproc->ct = high; + else if (!strcmp(priority, "Normal")) + coproc->ct = normal; + else { + pr_err("Invalid RxFIFO priority value\n"); + return -EINVAL; + } + + return 0; +} + static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, - int vasid, int *ct) + int vasid, int type, int *ct) { struct vas_window *rxwin = NULL; struct vas_rx_win_attr rxattr; - struct nx842_coproc *coproc; u32 lpid, pid, tid, fifo_size; + struct nx_coproc *coproc; u64 rx_fifo; const char *priority; int ret; @@ -794,15 +816,12 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, if (!coproc) return -ENOMEM; - if (!strcmp(priority, "High")) - coproc->ct = VAS_COP_TYPE_842_HIPRI; - else if (!strcmp(priority, "Normal")) - coproc->ct = VAS_COP_TYPE_842; - else { - pr_err("Invalid RxFIFO priority value\n"); - ret = -EINVAL; + if (type == NX_CT_842) + ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_842_HIPRI, + VAS_COP_TYPE_842); + + if (ret) goto err_out; - } vas_init_rx_win_attr(&rxattr, coproc->ct); rxattr.rx_fifo = (void *)rx_fifo; @@ -830,7 +849,7 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, coproc->vas.rxwin = rxwin; coproc->vas.id = vasid; - nx842_add_coprocs_list(coproc, chip_id); + nx_add_coprocs_list(coproc, chip_id); /* * (lpid, pid, tid) combination has to be unique for each @@ -848,13 +867,43 @@ err_out: return ret; } +static int __init nx_coproc_init(int chip_id, int ct_842) +{ + int ret = 0; -static int __init nx842_powernv_probe_vas(struct device_node *pn) + if (opal_check_token(OPAL_NX_COPROC_INIT)) { + ret = opal_nx_coproc_init(chip_id, ct_842); + if (ret) { + ret = opal_error_code(ret); + pr_err("Failed to initialize NX for chip(%d): %d\n", + chip_id, ret); + } + } else + pr_warn("Firmware doesn't support NX initialization\n"); + + return ret; +} + +static int __init find_nx_device_tree(struct device_node *dn, int chip_id, + int vasid, int type, char *devname, + int *ct) +{ + int ret = 0; + + if (of_device_is_compatible(dn, devname)) { + ret = vas_cfg_coproc_info(dn, chip_id, vasid, type, ct); + if (ret) + of_node_put(dn); + } + + return ret; +} + +static int __init nx_powernv_probe_vas(struct device_node *pn) { - struct device_node *dn; int chip_id, vasid, ret = 0; - int nx_fifo_found = 0; - int uninitialized_var(ct); + struct device_node *dn; + int ct_842 = 0; chip_id = of_get_ibm_chip_id(pn); if (chip_id < 0) { @@ -869,17 +918,13 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn) } for_each_child_of_node(pn, dn) { - if (of_device_is_compatible(dn, "ibm,p9-nx-842")) { - ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct); - if (ret) { - of_node_put(dn); - return ret; - } - nx_fifo_found++; - } + ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842, + "ibm,p9-nx-842", &ct_842); + if (ret) + return ret; } - if (!nx_fifo_found) { + if (!ct_842) { pr_err("NX842 FIFO nodes are missing\n"); return -EINVAL; } @@ -887,22 +932,14 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn) /* * Initialize NX instance for both high and normal priority FIFOs. */ - if (opal_check_token(OPAL_NX_COPROC_INIT)) { - ret = opal_nx_coproc_init(chip_id, ct); - if (ret) { - pr_err("Failed to initialize NX for chip(%d): %d\n", - chip_id, ret); - ret = opal_error_code(ret); - } - } else - pr_warn("Firmware doesn't support NX initialization\n"); + ret = nx_coproc_init(chip_id, ct_842); return ret; } static int __init nx842_powernv_probe(struct device_node *dn) { - struct nx842_coproc *coproc; + struct nx_coproc *coproc; unsigned int ct, ci; int chip_id; @@ -928,7 +965,7 @@ static int __init nx842_powernv_probe(struct device_node *dn) coproc->ct = ct; coproc->ci = ci; - nx842_add_coprocs_list(coproc, chip_id); + nx_add_coprocs_list(coproc, chip_id); pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); @@ -941,9 +978,9 @@ static int __init nx842_powernv_probe(struct device_node *dn) return 0; } -static void nx842_delete_coprocs(void) +static void nx_delete_coprocs(void) { - struct nx842_coproc *coproc, *n; + struct nx_coproc *coproc, *n; struct vas_window *txwin; int i; @@ -955,10 +992,10 @@ static void nx842_delete_coprocs(void) if (txwin) vas_win_close(txwin); - per_cpu(cpu_txwin, i) = 0; + per_cpu(cpu_txwin, i) = NULL; } - list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + list_for_each_entry_safe(coproc, n, &nx_coprocs, list) { if (coproc->vas.rxwin) vas_win_close(coproc->vas.rxwin); @@ -1002,7 +1039,7 @@ static struct crypto_alg nx842_powernv_alg = { .coa_decompress = nx842_crypto_decompress } } }; -static __init int nx842_powernv_init(void) +static __init int nx_compress_powernv_init(void) { struct device_node *dn; int ret; @@ -1017,15 +1054,15 @@ static __init int nx842_powernv_init(void) BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); for_each_compatible_node(dn, NULL, "ibm,power9-nx") { - ret = nx842_powernv_probe_vas(dn); + ret = nx_powernv_probe_vas(dn); if (ret) { - nx842_delete_coprocs(); + nx_delete_coprocs(); of_node_put(dn); return ret; } } - if (list_empty(&nx842_coprocs)) { + if (list_empty(&nx_coprocs)) { for_each_compatible_node(dn, NULL, "ibm,power-nx") nx842_powernv_probe(dn); @@ -1034,9 +1071,13 @@ static __init int nx842_powernv_init(void) nx842_powernv_exec = nx842_exec_icswx; } else { - ret = nx842_open_percpu_txwins(); + /* + * GZIP is not supported in kernel right now. + * So open tx windows only for 842. + */ + ret = nx_open_percpu_txwins(); if (ret) { - nx842_delete_coprocs(); + nx_delete_coprocs(); return ret; } @@ -1045,18 +1086,18 @@ static __init int nx842_powernv_init(void) ret = crypto_register_alg(&nx842_powernv_alg); if (ret) { - nx842_delete_coprocs(); + nx_delete_coprocs(); return ret; } return 0; } -module_init(nx842_powernv_init); +module_init(nx_compress_powernv_init); -static void __exit nx842_powernv_exit(void) +static void __exit nx_compress_powernv_exit(void) { crypto_unregister_alg(&nx842_powernv_alg); - nx842_delete_coprocs(); + nx_delete_coprocs(); } -module_exit(nx842_powernv_exit); +module_exit(nx_compress_powernv_exit); -- cgit v1.2.3-59-g8ed1b From 1af11ae225350a92a1e13a4c27c215befced20af Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:10:24 -0700 Subject: crypto/nx: Enable and setup GZIP compression type Changes to probe GZIP device-tree nodes, open RX windows and setup GZIP compression type. No plans to provide GZIP usage in kernel right now, but this patch enables GZIP for user space usage. Signed-off-by: Haren Myneni Acked-by: Herbert Xu Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114624.2275.1129.camel@hbabu-laptop --- drivers/crypto/nx/nx-common-powernv.c | 46 ++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 82dfa60ef3f4..651d2860de0d 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -65,6 +65,7 @@ static unsigned int nx842_ct; /* used in icswx function */ * Using same values as in skiboot or coprocessor type representing * in NX workbook. */ +#define NX_CT_GZIP (2) /* on P9 and later */ #define NX_CT_842 (3) static int (*nx842_powernv_exec)(const unsigned char *in, @@ -819,6 +820,9 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, if (type == NX_CT_842) ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_842_HIPRI, VAS_COP_TYPE_842); + else if (type == NX_CT_GZIP) + ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_GZIP_HIPRI, + VAS_COP_TYPE_GZIP); if (ret) goto err_out; @@ -867,12 +871,16 @@ err_out: return ret; } -static int __init nx_coproc_init(int chip_id, int ct_842) +static int __init nx_coproc_init(int chip_id, int ct_842, int ct_gzip) { int ret = 0; if (opal_check_token(OPAL_NX_COPROC_INIT)) { ret = opal_nx_coproc_init(chip_id, ct_842); + + if (!ret) + ret = opal_nx_coproc_init(chip_id, ct_gzip); + if (ret) { ret = opal_error_code(ret); pr_err("Failed to initialize NX for chip(%d): %d\n", @@ -902,8 +910,8 @@ static int __init find_nx_device_tree(struct device_node *dn, int chip_id, static int __init nx_powernv_probe_vas(struct device_node *pn) { int chip_id, vasid, ret = 0; + int ct_842 = 0, ct_gzip = 0; struct device_node *dn; - int ct_842 = 0; chip_id = of_get_ibm_chip_id(pn); if (chip_id < 0) { @@ -920,19 +928,24 @@ static int __init nx_powernv_probe_vas(struct device_node *pn) for_each_child_of_node(pn, dn) { ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842, "ibm,p9-nx-842", &ct_842); + + if (!ret) + ret = find_nx_device_tree(dn, chip_id, vasid, + NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip); + if (ret) return ret; } - if (!ct_842) { - pr_err("NX842 FIFO nodes are missing\n"); + if (!ct_842 || !ct_gzip) { + pr_err("NX FIFO nodes are missing\n"); return -EINVAL; } /* * Initialize NX instance for both high and normal priority FIFOs. */ - ret = nx_coproc_init(chip_id, ct_842); + ret = nx_coproc_init(chip_id, ct_842, ct_gzip); return ret; } @@ -1071,11 +1084,23 @@ static __init int nx_compress_powernv_init(void) nx842_powernv_exec = nx842_exec_icswx; } else { + /* + * Register VAS user space API for NX GZIP so + * that user space can use GZIP engine. + * Using high FIFO priority for kernel requests and + * normal FIFO priority is assigned for userspace. + * 842 compression is supported only in kernel. + */ + ret = vas_register_coproc_api(THIS_MODULE, VAS_COP_TYPE_GZIP, + "nx-gzip"); + /* * GZIP is not supported in kernel right now. * So open tx windows only for 842. */ - ret = nx_open_percpu_txwins(); + if (!ret) + ret = nx_open_percpu_txwins(); + if (ret) { nx_delete_coprocs(); return ret; @@ -1096,6 +1121,15 @@ module_init(nx_compress_powernv_init); static void __exit nx_compress_powernv_exit(void) { + /* + * GZIP engine is supported only in power9 or later and nx842_ct + * is used on power8 (icswx). + * VAS API for NX GZIP is registered during init for user space + * use. So delete this API use for GZIP engine. + */ + if (!nx842_ct) + vas_unregister_coproc_api(); + crypto_unregister_alg(&nx842_powernv_alg); nx_delete_coprocs(); -- cgit v1.2.3-59-g8ed1b From 040b00acec4bbbed7493fd64829e74a055075fb2 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:11:14 -0700 Subject: crypto/nx: Remove 'pid' in vas_tx_win_attr struct When window is opened, pid reference is taken for user space windows. Not needed for kernel windows. So remove 'pid' in vas_tx_win_attr struct. Signed-off-by: Haren Myneni Acked-by: Herbert Xu Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114674.2275.1132.camel@hbabu-laptop --- arch/powerpc/include/asm/vas.h | 1 - drivers/crypto/nx/nx-common-powernv.c | 1 - 2 files changed, 2 deletions(-) diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h index 6e427bc29b3a..e33f80b0ea81 100644 --- a/arch/powerpc/include/asm/vas.h +++ b/arch/powerpc/include/asm/vas.h @@ -86,7 +86,6 @@ struct vas_tx_win_attr { int wcreds_max; int lpid; int pidr; /* hardware PID (from SPRN_PID) */ - int pid; /* linux process id */ int pswid; int rsvd_txbuf_count; int tc_mode; diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 651d2860de0d..13c65deda8e9 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -692,7 +692,6 @@ static struct vas_window *nx_alloc_txwin(struct nx_coproc *coproc) */ vas_init_tx_win_attr(&txattr, coproc->ct); txattr.lpid = 0; /* lpid is 0 for kernel requests */ - txattr.pid = 0; /* pid is 0 for kernel requests */ /* * Open a VAS send window which is used to send request to NX. -- cgit v1.2.3-59-g8ed1b From c12e38b1d52e995a0efe6d011873f57e04b80b89 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Fri, 17 Apr 2020 02:11:55 -0700 Subject: Documentation/powerpc: VAS API Power9 introduced Virtual Accelerator Switchboard (VAS) which allows userspace to communicate with Nest Accelerator (NX) directly. But kernel has to establish channel to NX for userspace. This document describes user space API that application can use to establish communication channel. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Haren Myneni Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1587114715.2275.1135.camel@hbabu-laptop --- Documentation/powerpc/index.rst | 1 + Documentation/powerpc/vas-api.rst | 292 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 293 insertions(+) create mode 100644 Documentation/powerpc/vas-api.rst diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst index 0d45f0fc8e57..afe2d5e54db6 100644 --- a/Documentation/powerpc/index.rst +++ b/Documentation/powerpc/index.rst @@ -30,6 +30,7 @@ powerpc syscall64-abi transactional_memory ultravisor + vas-api .. only:: subproject and html diff --git a/Documentation/powerpc/vas-api.rst b/Documentation/powerpc/vas-api.rst new file mode 100644 index 000000000000..1217c2f1595e --- /dev/null +++ b/Documentation/powerpc/vas-api.rst @@ -0,0 +1,292 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. _VAS-API: + +=================================================== +Virtual Accelerator Switchboard (VAS) userspace API +=================================================== + +Introduction +============ + +Power9 processor introduced Virtual Accelerator Switchboard (VAS) which +allows both userspace and kernel communicate to co-processor +(hardware accelerator) referred to as the Nest Accelerator (NX). The NX +unit comprises of one or more hardware engines or co-processor types +such as 842 compression, GZIP compression and encryption. On power9, +userspace applications will have access to only GZIP Compression engine +which supports ZLIB and GZIP compression algorithms in the hardware. + +To communicate with NX, kernel has to establish a channel or window and +then requests can be submitted directly without kernel involvement. +Requests to the GZIP engine must be formatted as a co-processor Request +Block (CRB) and these CRBs must be submitted to the NX using COPY/PASTE +instructions to paste the CRB to hardware address that is associated with +the engine's request queue. + +The GZIP engine provides two priority levels of requests: Normal and +High. Only Normal requests are supported from userspace right now. + +This document explains userspace API that is used to interact with +kernel to setup channel / window which can be used to send compression +requests directly to NX accelerator. + + +Overview +======== + +Application access to the GZIP engine is provided through +/dev/crypto/nx-gzip device node implemented by the VAS/NX device driver. +An application must open the /dev/crypto/nx-gzip device to obtain a file +descriptor (fd). Then should issue VAS_TX_WIN_OPEN ioctl with this fd to +establish connection to the engine. It means send window is opened on GZIP +engine for this process. Once a connection is established, the application +should use the mmap() system call to map the hardware address of engine's +request queue into the application's virtual address space. + +The application can then submit one or more requests to the the engine by +using copy/paste instructions and pasting the CRBs to the virtual address +(aka paste_address) returned by mmap(). User space can close the +established connection or send window by closing the file descriptior +(close(fd)) or upon the process exit. + +Note that applications can send several requests with the same window or +can establish multiple windows, but one window for each file descriptor. + +Following sections provide additional details and references about the +individual steps. + +NX-GZIP Device Node +=================== + +There is one /dev/crypto/nx-gzip node in the system and it provides +access to all GZIP engines in the system. The only valid operations on +/dev/crypto/nx-gzip are: + + * open() the device for read and write. + * issue VAS_TX_WIN_OPEN ioctl + * mmap() the engine's request queue into application's virtual + address space (i.e. get a paste_address for the co-processor + engine). + * close the device node. + +Other file operations on this device node are undefined. + +Note that the copy and paste operations go directly to the hardware and +do not go through this device. Refer COPY/PASTE document for more +details. + +Although a system may have several instances of the NX co-processor +engines (typically, one per P9 chip) there is just one +/dev/crypto/nx-gzip device node in the system. When the nx-gzip device +node is opened, Kernel opens send window on a suitable instance of NX +accelerator. It finds CPU on which the user process is executing and +determine the NX instance for the corresponding chip on which this CPU +belongs. + +Applications may chose a specific instance of the NX co-processor using +the vas_id field in the VAS_TX_WIN_OPEN ioctl as detailed below. + +A userspace library libnxz is available here but still in development: + https://github.com/abalib/power-gzip + +Applications that use inflate / deflate calls can link with libnxz +instead of libz and use NX GZIP compression without any modification. + +Open /dev/crypto/nx-gzip +======================== + +The nx-gzip device should be opened for read and write. No special +privileges are needed to open the device. Each window corresponds to one +file descriptor. So if the userspace process needs multiple windows, +several open calls have to be issued. + +See open(2) system call man pages for other details such as return values, +error codes and restrictions. + +VAS_TX_WIN_OPEN ioctl +===================== + +Applications should use the VAS_TX_WIN_OPEN ioctl as follows to establish +a connection with NX co-processor engine: + + :: + struct vas_tx_win_open_attr { + __u32 version; + __s16 vas_id; /* specific instance of vas or -1 + for default */ + __u16 reserved1; + __u64 flags; /* For future use */ + __u64 reserved2[6]; + }; + + version: The version field must be currently set to 1. + vas_id: If '-1' is passed, kernel will make a best-effort attempt + to assign an optimal instance of NX for the process. To + select the specific VAS instance, refer + "Discovery of available VAS engines" section below. + + flags, reserved1 and reserved2[6] fields are for future extension + and must be set to 0. + + The attributes attr for the VAS_TX_WIN_OPEN ioctl are defined as + follows: + #define VAS_MAGIC 'v' + #define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 1, + struct vas_tx_win_open_attr) + + struct vas_tx_win_open_attr attr; + rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr); + + The VAS_TX_WIN_OPEN ioctl returns 0 on success. On errors, it + returns -1 and sets the errno variable to indicate the error. + + Error conditions: + EINVAL fd does not refer to a valid VAS device. + EINVAL Invalid vas ID + EINVAL version is not set with proper value + EEXIST Window is already opened for the given fd + ENOMEM Memory is not available to allocate window + ENOSPC System has too many active windows (connections) + opened + EINVAL reserved fields are not set to 0. + + See the ioctl(2) man page for more details, error codes and + restrictions. + +mmap() NX-GZIP device +===================== + +The mmap() system call for a NX-GZIP device fd returns a paste_address +that the application can use to copy/paste its CRB to the hardware engines. + :: + + paste_addr = mmap(addr, size, prot, flags, fd, offset); + + Only restrictions on mmap for a NX-GZIP device fd are: + * size should be PAGE_SIZE + * offset parameter should be 0ULL + + Refer to mmap(2) man page for additional details/restrictions. + In addition to the error conditions listed on the mmap(2) man + page, can also fail with one of the following error codes: + + EINVAL fd is not associated with an open window + (i.e mmap() does not follow a successful call + to the VAS_TX_WIN_OPEN ioctl). + EINVAL offset field is not 0ULL. + +Discovery of available VAS engines +================================== + +Each available VAS instance in the system will have a device tree node +like /proc/device-tree/vas@* or /proc/device-tree/xscom@*/vas@*. +Determine the chip or VAS instance and use the corresponding ibm,vas-id +property value in this node to select specific VAS instance. + +Copy/Paste operations +===================== + +Applications should use the copy and paste instructions to send CRB to NX. +Refer section 4.4 in PowerISA for Copy/Paste instructions: +https://openpowerfoundation.org/?resource_lib=power-isa-version-3-0 + +CRB Specification and use NX +============================ + +Applications should format requests to the co-processor using the +co-processor Request Block (CRBs). Refer NX-GZIP user's manual for the format +of CRB and use NX from userspace such as sending requests and checking +request status. + +NX Fault handling +================= + +Applications send requests to NX and wait for the status by polling on +co-processor Status Block (CSB) flags. NX updates status in CSB after each +request is processed. Refer NX-GZIP user's manual for the format of CSB and +status flags. + +In case if NX encounters translation error (called NX page fault) on CSB +address or any request buffer, raises an interrupt on the CPU to handle the +fault. Page fault can happen if an application passes invalid addresses or +request buffers are not in memory. The operating system handles the fault by +updating CSB with the following data: + + csb.flags = CSB_V; + csb.cc = CSB_CC_TRANSLATION; + csb.ce = CSB_CE_TERMINATION; + csb.address = fault_address; + +When an application receives translation error, it can touch or access +the page that has a fault address so that this page will be in memory. Then +the application can resend this request to NX. + +If the OS can not update CSB due to invalid CSB address, sends SEGV signal +to the process who opened the send window on which the original request was +issued. This signal returns with the following siginfo struct: + + siginfo.si_signo = SIGSEGV; + siginfo.si_errno = EFAULT; + siginfo.si_code = SEGV_MAPERR; + siginfo.si_addr = CSB adress; + +In the case of multi-thread applications, NX send windows can be shared +across all threads. For example, a child thread can open a send window, +but other threads can send requests to NX using this window. These +requests will be successful even in the case of OS handling faults as long +as CSB address is valid. If the NX request contains an invalid CSB address, +the signal will be sent to the child thread that opened the window. But if +the thread is exited without closing the window and the request is issued +using this window. the signal will be issued to the thread group leader +(tgid). It is up to the application whether to ignore or handle these +signals. + +NX-GZIP User's Manual: +https://github.com/libnxz/power-gzip/blob/master/power_nx_gzip_um.pdf + +Simple example +============== + + :: + int use_nx_gzip() + { + int rc, fd; + void *addr; + struct vas_setup_attr txattr; + + fd = open("/dev/crypto/nx-gzip", O_RDWR); + if (fd < 0) { + fprintf(stderr, "open nx-gzip failed\n"); + return -1; + } + memset(&txattr, 0, sizeof(txattr)); + txattr.version = 1; + txattr.vas_id = -1 + rc = ioctl(fd, VAS_TX_WIN_OPEN, + (unsigned long)&txattr); + if (rc < 0) { + fprintf(stderr, "ioctl() n %d, error %d\n", + rc, errno); + return rc; + } + addr = mmap(NULL, 4096, PROT_READ|PROT_WRITE, + MAP_SHARED, fd, 0ULL); + if (addr == MAP_FAILED) { + fprintf(stderr, "mmap() failed, errno %d\n", + errno); + return -errno; + } + do { + //Format CRB request with compression or + //uncompression + // Refer tests for vas_copy/vas_paste + vas_copy((&crb, 0, 1); + vas_paste(addr, 0, 1); + // Poll on csb.flags with timeout + // csb address is listed in CRB + } while (true) + close(fd) or window can be closed upon process exit + } + + Refer https://github.com/abalib/power-gzip for tests or more + use cases. -- cgit v1.2.3-59-g8ed1b From d53979b589609d87036d8daf9500f7eccb0c6317 Mon Sep 17 00:00:00 2001 From: Raphael Moreira Zinsly Date: Mon, 20 Apr 2020 17:55:34 -0300 Subject: selftests/powerpc: Add header files for GZIP engine test Add files to access the powerpc NX-GZIP engine in user space. Signed-off-by: Bulent Abali Signed-off-by: Raphael Moreira Zinsly Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200420205538.25181-2-rzinsly@linux.ibm.com --- .../selftests/powerpc/nx-gzip/include/crb.h | 155 +++++++++++++++++++++ .../testing/selftests/powerpc/nx-gzip/include/nx.h | 38 +++++ .../selftests/powerpc/nx-gzip/include/vas-api.h | 1 + 3 files changed, 194 insertions(+) create mode 100644 tools/testing/selftests/powerpc/nx-gzip/include/crb.h create mode 100644 tools/testing/selftests/powerpc/nx-gzip/include/nx.h create mode 120000 tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/crb.h b/tools/testing/selftests/powerpc/nx-gzip/include/crb.h new file mode 100644 index 000000000000..ab101085fa7e --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/include/crb.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __CRB_H +#define __CRB_H +#include +#include "nx.h" + +/* CCW 842 CI/FC masks + * NX P8 workbook, section 4.3.1, figure 4-6 + * "CI/FC Boundary by NX CT type" + */ +#define CCW_CI_842 (0x00003ff8) +#define CCW_FC_842 (0x00000007) + +/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */ + +#define CCB_VALUE (0x3fffffffffffffff) +#define CCB_ADDRESS (0xfffffffffffffff8) +#define CCB_CM (0x0000000000000007) +#define CCB_CM0 (0x0000000000000004) +#define CCB_CM12 (0x0000000000000003) + +#define CCB_CM0_ALL_COMPLETIONS (0x0) +#define CCB_CM0_LAST_IN_CHAIN (0x4) +#define CCB_CM12_STORE (0x0) +#define CCB_CM12_INTERRUPT (0x1) + +#define CCB_SIZE (0x10) +#define CCB_ALIGN CCB_SIZE + +struct coprocessor_completion_block { + __be64 value; + __be64 address; +} __aligned(CCB_ALIGN); + + +/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */ + +#define CSB_V (0x80) +#define CSB_F (0x04) +#define CSB_CH (0x03) +#define CSB_CE_INCOMPLETE (0x80) +#define CSB_CE_TERMINATION (0x40) +#define CSB_CE_TPBC (0x20) + +#define CSB_CC_SUCCESS (0) +#define CSB_CC_INVALID_ALIGN (1) +#define CSB_CC_OPERAND_OVERLAP (2) +#define CSB_CC_DATA_LENGTH (3) +#define CSB_CC_TRANSLATION (5) +#define CSB_CC_PROTECTION (6) +#define CSB_CC_RD_EXTERNAL (7) +#define CSB_CC_INVALID_OPERAND (8) +#define CSB_CC_PRIVILEGE (9) +#define CSB_CC_INTERNAL (10) +#define CSB_CC_WR_EXTERNAL (12) +#define CSB_CC_NOSPC (13) +#define CSB_CC_EXCESSIVE_DDE (14) +#define CSB_CC_WR_TRANSLATION (15) +#define CSB_CC_WR_PROTECTION (16) +#define CSB_CC_UNKNOWN_CODE (17) +#define CSB_CC_ABORT (18) +#define CSB_CC_TRANSPORT (20) +#define CSB_CC_SEGMENTED_DDL (31) +#define CSB_CC_PROGRESS_POINT (32) +#define CSB_CC_DDE_OVERFLOW (33) +#define CSB_CC_SESSION (34) +#define CSB_CC_PROVISION (36) +#define CSB_CC_CHAIN (37) +#define CSB_CC_SEQUENCE (38) +#define CSB_CC_HW (39) + +#define CSB_SIZE (0x10) +#define CSB_ALIGN CSB_SIZE + +struct coprocessor_status_block { + __u8 flags; + __u8 cs; + __u8 cc; + __u8 ce; + __be32 count; + __be64 address; +} __aligned(CSB_ALIGN); + + +/* Chapter 6.5.10 Data-Descriptor List (DDL) + * each list contains one or more Data-Descriptor Entries (DDE) + */ + +#define DDE_P (0x8000) + +#define DDE_SIZE (0x10) +#define DDE_ALIGN DDE_SIZE + +struct data_descriptor_entry { + __be16 flags; + __u8 count; + __u8 index; + __be32 length; + __be64 address; +} __aligned(DDE_ALIGN); + + +/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */ + +#define CRB_SIZE (0x80) +#define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */ + + +/* Coprocessor Status Block field + * ADDRESS address of CSB + * C CCB is valid + * AT 0 = addrs are virtual, 1 = addrs are phys + * M enable perf monitor + */ +#define CRB_CSB_ADDRESS (0xfffffffffffffff0) +#define CRB_CSB_C (0x0000000000000008) +#define CRB_CSB_AT (0x0000000000000002) +#define CRB_CSB_M (0x0000000000000001) + +struct coprocessor_request_block { + __be32 ccw; + __be32 flags; + __be64 csb_addr; + + struct data_descriptor_entry source; + struct data_descriptor_entry target; + + struct coprocessor_completion_block ccb; + + __u8 reserved[48]; + + struct coprocessor_status_block csb; +} __aligned(CRB_ALIGN); + +#define crb_csb_addr(c) __be64_to_cpu(c->csb_addr) +#define crb_nx_fault_addr(c) __be64_to_cpu(c->stamp.nx.fault_storage_addr) +#define crb_nx_flags(c) c->stamp.nx.flags +#define crb_nx_fault_status(c) c->stamp.nx.fault_status +#define crb_nx_pswid(c) c->stamp.nx.pswid + + +/* RFC02167 Initiate Coprocessor Instructions document + * Chapter 8.2.1.1.1 RS + * Chapter 8.2.3 Coprocessor Directive + * Chapter 8.2.4 Execution + * + * The CCW must be converted to BE before passing to icswx() + */ + +#define CCW_PS (0xff000000) +#define CCW_CT (0x00ff0000) +#define CCW_CD (0x0000ffff) +#define CCW_CL (0x0000c000) + +#endif diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nx.h b/tools/testing/selftests/powerpc/nx-gzip/include/nx.h new file mode 100644 index 000000000000..1abe23fc29e8 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/include/nx.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2020 IBM Corp. + * + */ +#ifndef _NX_H +#define _NX_H + +#include + +#define NX_FUNC_COMP_842 1 +#define NX_FUNC_COMP_GZIP 2 + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct nx842_func_args { + bool use_crc; + bool decompress; /* true decompress; false compress */ + bool move_data; + int timeout; /* seconds */ +}; + +struct nxbuf_t { + int len; + char *buf; +}; + +/* @function should be EFT (aka 842), GZIP etc */ +void *nx_function_begin(int function, int pri); + +int nx_function(void *handle, struct nxbuf_t *in, struct nxbuf_t *out, + void *arg); + +int nx_function_end(void *handle); + +#endif /* _NX_H */ diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h b/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h new file mode 120000 index 000000000000..77fb4c7236d0 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h @@ -0,0 +1 @@ +../../../../../../arch/powerpc/include/uapi/asm/vas-api.h \ No newline at end of file -- cgit v1.2.3-59-g8ed1b From f49b75724cfa2a6264aa5a77f33c3883701852af Mon Sep 17 00:00:00 2001 From: Raphael Moreira Zinsly Date: Mon, 20 Apr 2020 17:55:35 -0300 Subject: selftests/powerpc: Add header files for NX compresion/decompression Add files to be able to compress and decompress files using the powerpc NX-GZIP engine. Signed-off-by: Bulent Abali Signed-off-by: Raphael Moreira Zinsly Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200420205538.25181-3-rzinsly@linux.ibm.com --- .../selftests/powerpc/nx-gzip/include/copy-paste.h | 56 ++ .../selftests/powerpc/nx-gzip/include/nx_dbg.h | 95 +++ .../selftests/powerpc/nx-gzip/include/nxu.h | 650 +++++++++++++++++++++ 3 files changed, 801 insertions(+) create mode 100644 tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h create mode 100644 tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h create mode 100644 tools/testing/selftests/powerpc/nx-gzip/include/nxu.h diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h b/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h new file mode 100644 index 000000000000..0db2d6485037 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* From asm-compat.h */ +#define __stringify_in_c(...) #__VA_ARGS__ +#define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " + +/* + * Macros taken from arch/powerpc/include/asm/ppc-opcode.h and other + * header files. + */ +#define ___PPC_RA(a) (((a) & 0x1f) << 16) +#define ___PPC_RB(b) (((b) & 0x1f) << 11) + +#define PPC_INST_COPY 0x7c20060c +#define PPC_INST_PASTE 0x7c20070d + +#define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_PASTE(a, b) stringify_in_c(.long PPC_INST_PASTE | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define CR0_SHIFT 28 +#define CR0_MASK 0xF +/* + * Copy/paste instructions: + * + * copy RA,RB + * Copy contents of address (RA) + effective_address(RB) + * to internal copy-buffer. + * + * paste RA,RB + * Paste contents of internal copy-buffer to the address + * (RA) + effective_address(RB) + */ +static inline int vas_copy(void *crb, int offset) +{ + asm volatile(PPC_COPY(%0, %1)";" + : + : "b" (offset), "b" (crb) + : "memory"); + + return 0; +} + +static inline int vas_paste(void *paste_address, int offset) +{ + __u32 cr; + + cr = 0; + asm volatile(PPC_PASTE(%1, %2)";" + "mfocrf %0, 0x80;" + : "=r" (cr) + : "b" (offset), "b" (paste_address) + : "memory", "cr0"); + + return (cr >> CR0_SHIFT) & CR0_MASK; +} diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h b/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h new file mode 100644 index 000000000000..16464e19c47f --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2020 IBM Corporation + * + */ + +#ifndef _NXU_DBG_H_ +#define _NXU_DBG_H_ + +#include +#include +#include +#include +#include + +extern FILE * nx_gzip_log; +extern int nx_gzip_trace; +extern unsigned int nx_gzip_inflate_impl; +extern unsigned int nx_gzip_deflate_impl; +extern unsigned int nx_gzip_inflate_flags; +extern unsigned int nx_gzip_deflate_flags; + +extern int nx_dbg; +pthread_mutex_t mutex_log; + +#define nx_gzip_trace_enabled() (nx_gzip_trace & 0x1) +#define nx_gzip_hw_trace_enabled() (nx_gzip_trace & 0x2) +#define nx_gzip_sw_trace_enabled() (nx_gzip_trace & 0x4) +#define nx_gzip_gather_statistics() (nx_gzip_trace & 0x8) +#define nx_gzip_per_stream_stat() (nx_gzip_trace & 0x10) + +#define prt(fmt, ...) do { \ + pthread_mutex_lock(&mutex_log); \ + flock(nx_gzip_log->_fileno, LOCK_EX); \ + time_t t; struct tm *m; time(&t); m = localtime(&t); \ + fprintf(nx_gzip_log, "[%04d/%02d/%02d %02d:%02d:%02d] " \ + "pid %d: " fmt, \ + (int)m->tm_year + 1900, (int)m->tm_mon+1, (int)m->tm_mday, \ + (int)m->tm_hour, (int)m->tm_min, (int)m->tm_sec, \ + (int)getpid(), ## __VA_ARGS__); \ + fflush(nx_gzip_log); \ + flock(nx_gzip_log->_fileno, LOCK_UN); \ + pthread_mutex_unlock(&mutex_log); \ +} while (0) + +/* Use in case of an error */ +#define prt_err(fmt, ...) do { if (nx_dbg >= 0) { \ + prt("%s:%u: Error: "fmt, \ + __FILE__, __LINE__, ## __VA_ARGS__); \ +}} while (0) + +/* Use in case of an warning */ +#define prt_warn(fmt, ...) do { if (nx_dbg >= 1) { \ + prt("%s:%u: Warning: "fmt, \ + __FILE__, __LINE__, ## __VA_ARGS__); \ +}} while (0) + +/* Informational printouts */ +#define prt_info(fmt, ...) do { if (nx_dbg >= 2) { \ + prt("Info: "fmt, ## __VA_ARGS__); \ +}} while (0) + +/* Trace zlib wrapper code */ +#define prt_trace(fmt, ...) do { if (nx_gzip_trace_enabled()) { \ + prt("### "fmt, ## __VA_ARGS__); \ +}} while (0) + +/* Trace statistics */ +#define prt_stat(fmt, ...) do { if (nx_gzip_gather_statistics()) { \ + prt("### "fmt, ## __VA_ARGS__); \ +}} while (0) + +/* Trace zlib hardware implementation */ +#define hw_trace(fmt, ...) do { \ + if (nx_gzip_hw_trace_enabled()) \ + fprintf(nx_gzip_log, "hhh " fmt, ## __VA_ARGS__); \ + } while (0) + +/* Trace zlib software implementation */ +#define sw_trace(fmt, ...) do { \ + if (nx_gzip_sw_trace_enabled()) \ + fprintf(nx_gzip_log, "sss " fmt, ## __VA_ARGS__); \ + } while (0) + + +/** + * str_to_num - Convert string into number and copy with endings like + * KiB for kilobyte + * MiB for megabyte + * GiB for gigabyte + */ +uint64_t str_to_num(char *str); +void nx_lib_debug(int onoff); + +#endif /* _NXU_DBG_H_ */ diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h b/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h new file mode 100644 index 000000000000..20a4e883e0d3 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h @@ -0,0 +1,650 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hardware interface of the NX-GZIP compression accelerator + * + * Copyright (C) IBM Corporation, 2020 + * + * Author: Bulent Abali + * + */ + +#ifndef _NXU_H +#define _NXU_H + +#include +#include +#include "nx.h" + +/* deflate */ +#define LLSZ 286 +#define DSZ 30 + +/* nx */ +#define DHTSZ 18 +#define DHT_MAXSZ 288 +#define MAX_DDE_COUNT 256 + +/* util */ +#ifdef NXDBG +#define NXPRT(X) X +#else +#define NXPRT(X) +#endif + +#ifdef NXTIMER +#include +#define NX_CLK(X) X +#define nx_get_time() __ppc_get_timebase() +#define nx_get_freq() __ppc_get_timebase_freq() +#else +#define NX_CLK(X) +#define nx_get_time() (-1) +#define nx_get_freq() (-1) +#endif + +#define NX_MAX_FAULTS 500 + +/* + * Definitions of acronyms used here. See + * P9 NX Gzip Accelerator User's Manual for details: + * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf + * + * adler/crc: 32 bit checksums appended to stream tail + * ce: completion extension + * cpb: coprocessor parameter block (metadata) + * crb: coprocessor request block (command) + * csb: coprocessor status block (status) + * dht: dynamic huffman table + * dde: data descriptor element (address, length) + * ddl: list of ddes + * dh/fh: dynamic and fixed huffman types + * fc: coprocessor function code + * histlen: history/dictionary length + * history: sliding window of up to 32KB of data + * lzcount: Deflate LZ symbol counts + * rembytecnt: remaining byte count + * sfbt: source final block type; last block's type during decomp + * spbc: source processed byte count + * subc: source unprocessed bit count + * tebc: target ending bit count; valid bits in the last byte + * tpbc: target processed byte count + * vas: virtual accelerator switch; the user mode interface + */ + +union nx_qw_t { + uint32_t word[4]; + uint64_t dword[2]; +} __aligned(16); + +/* + * Note: NX registers with fewer than 32 bits are declared by + * convention as uint32_t variables in unions. If *_offset and *_mask + * are defined for a variable, then use get_ put_ macros to + * conveniently access the register fields for endian conversions. + */ + +struct nx_dde_t { + /* Data Descriptor Element, Section 6.4 */ + union { + uint32_t dde_count; + /* When dde_count == 0 ddead is a pointer to a data buffer; + * ddebc is the buffer length bytes. + * When dde_count > 0 dde is an indirect dde; ddead is a + * pointer to a contiguous list of direct ddes; ddebc is the + * total length of all data pointed to by the list of direct + * ddes. Note that only one level of indirection is permitted. + * See Section 6.4 of the user manual for additional details. + */ + }; + uint32_t ddebc; /* dde byte count */ + uint64_t ddead; /* dde address */ +} __aligned(16); + +struct nx_csb_t { + /* Coprocessor Status Block, Section 6.6 */ + union { + uint32_t csb_v; + /* Valid bit. v must be set to 0 by the program + * before submitting the coprocessor command. + * Software can poll for the v bit + */ + + uint32_t csb_f; + /* 16B CSB size. Written to 0 by DMA when it writes the CPB */ + + uint32_t csb_cs; + /* cs completion sequence; unused */ + + uint32_t csb_cc; + /* cc completion code; cc != 0 exception occurred */ + + uint32_t csb_ce; + /* ce completion extension */ + + }; + uint32_t tpbc; + /* target processed byte count TPBC */ + + uint64_t fsaddr; + /* Section 6.12.1 CSB NonZero error summary. FSA Failing storage + * address. Address where error occurred. When available, written + * to A field of CSB + */ +} __aligned(16); + +struct nx_ccb_t { + /* Coprocessor Completion Block, Section 6.7 */ + + uint32_t reserved[3]; + union { + /* When crb.c==0 (no ccb defined) it is reserved; + * When crb.c==1 (ccb defined) it is cm + */ + + uint32_t ccb_cm; + /* Signal interrupt of crb.c==1 and cm==1 */ + + uint32_t word; + /* generic access to the 32bit word */ + }; +} __aligned(16); + +struct vas_stamped_crb_t { + /* + * CRB operand of the paste coprocessor instruction is stamped + * in quadword 4 with the information shown here as its written + * in to the receive FIFO of the coprocessor + */ + + union { + uint32_t vas_buf_num; + /* Verification only vas buffer number which correlates to + * the low order bits of the atag in the paste command + */ + + uint32_t send_wc_id; + /* Pointer to Send Window Context that provides for NX address + * translation information, such as MSR and LPCR bits, job + * completion interrupt RA, PSWID, and job utilization counter. + */ + + }; + union { + uint32_t recv_wc_id; + /* Pointer to Receive Window Context. NX uses this to return + * credits to a Receive FIFO as entries are dequeued. + */ + + }; + uint32_t reserved2; + union { + uint32_t vas_invalid; + /* Invalid bit. If this bit is 1 the CRB is discarded by + * NX upon fetching from the receive FIFO. If this bit is 0 + * the CRB is processed normally. The bit is stamped to 0 + * by VAS and may be written to 1 by hypervisor while + * the CRB is in the receive FIFO (in memory). + */ + + }; +}; + +struct nx_stamped_fault_crb_t { + /* + * A CRB that has a translation fault is stamped by NX in quadword 4 + * and pasted to the Fault Send Window in VAS. + */ + uint64_t fsa; + union { + uint32_t nxsf_t; + uint32_t nxsf_fs; + }; + uint32_t pswid; +}; + +union stamped_crb_t { + struct vas_stamped_crb_t vas; + struct nx_stamped_fault_crb_t nx; +}; + +struct nx_gzip_cpb_t { + /* + * Coprocessor Parameter Block In/Out are used to pass metadata + * to/from accelerator. Tables 6.5 and 6.6 of the user manual. + */ + + /* CPBInput */ + + struct { + union { + union nx_qw_t qw0; + struct { + uint32_t in_adler; /* bits 0:31 */ + uint32_t in_crc; /* bits 32:63 */ + union { + uint32_t in_histlen; /* bits 64:75 */ + uint32_t in_subc; /* bits 93:95 */ + }; + union { + /* bits 108:111 */ + uint32_t in_sfbt; + /* bits 112:127 */ + uint32_t in_rembytecnt; + /* bits 116:127 */ + uint32_t in_dhtlen; + }; + }; + }; + union { + union nx_qw_t in_dht[DHTSZ]; /* qw[1:18] */ + char in_dht_char[DHT_MAXSZ]; /* byte access */ + }; + union nx_qw_t reserved[5]; /* qw[19:23] */ + }; + + /* CPBOutput */ + + volatile struct { + union { + union nx_qw_t qw24; + struct { + uint32_t out_adler; /* bits 0:31 qw[24] */ + uint32_t out_crc; /* bits 32:63 qw[24] */ + union { + /* bits 77:79 qw[24] */ + uint32_t out_tebc; + /* bits 80:95 qw[24] */ + uint32_t out_subc; + }; + union { + /* bits 108:111 qw[24] */ + uint32_t out_sfbt; + /* bits 112:127 qw[24] */ + uint32_t out_rembytecnt; + /* bits 116:127 qw[24] */ + uint32_t out_dhtlen; + }; + }; + }; + union { + union nx_qw_t qw25[79]; /* qw[25:103] */ + /* qw[25] compress no lzcounts or wrap */ + uint32_t out_spbc_comp_wrap; + uint32_t out_spbc_wrap; /* qw[25] wrap */ + /* qw[25] compress no lzcounts */ + uint32_t out_spbc_comp; + /* 286 LL and 30 D symbol counts */ + uint32_t out_lzcount[LLSZ+DSZ]; + struct { + union nx_qw_t out_dht[DHTSZ]; /* qw[25:42] */ + /* qw[43] decompress */ + uint32_t out_spbc_decomp; + }; + }; + /* qw[104] compress with lzcounts */ + uint32_t out_spbc_comp_with_count; + }; +} __aligned(128); + +struct nx_gzip_crb_t { + union { /* byte[0:3] */ + uint32_t gzip_fc; /* bits[24-31] */ + }; + uint32_t reserved1; /* byte[4:7] */ + union { + uint64_t csb_address; /* byte[8:15] */ + struct { + uint32_t reserved2; + union { + uint32_t crb_c; + /* c==0 no ccb defined */ + + uint32_t crb_at; + /* at==0 address type is ignored; + * all addrs effective assumed. + */ + + }; + }; + }; + struct nx_dde_t source_dde; /* byte[16:31] */ + struct nx_dde_t target_dde; /* byte[32:47] */ + volatile struct nx_ccb_t ccb; /* byte[48:63] */ + volatile union { + /* byte[64:239] shift csb by 128 bytes out of the crb; csb was + * in crb earlier; JReilly says csb written with partial inject + */ + union nx_qw_t reserved64[11]; + union stamped_crb_t stamp; /* byte[64:79] */ + }; + volatile struct nx_csb_t csb; +} __aligned(128); + +struct nx_gzip_crb_cpb_t { + struct nx_gzip_crb_t crb; + struct nx_gzip_cpb_t cpb; +} __aligned(2048); + + +/* + * NX hardware convention has the msb bit on the left numbered 0. + * The defines below has *_offset defined as the right most bit + * position of a field. x of size_mask(x) is the field width in bits. + */ + +#define size_mask(x) ((1U<<(x))-1) + +/* + * Offsets and Widths within the containing 32 bits of the various NX + * gzip hardware registers. Use the getnn/putnn macros to access + * these regs + */ + +#define dde_count_mask size_mask(8) +#define dde_count_offset 23 + +/* CSB */ + +#define csb_v_mask size_mask(1) +#define csb_v_offset 0 +#define csb_f_mask size_mask(1) +#define csb_f_offset 6 +#define csb_cs_mask size_mask(8) +#define csb_cs_offset 15 +#define csb_cc_mask size_mask(8) +#define csb_cc_offset 23 +#define csb_ce_mask size_mask(8) +#define csb_ce_offset 31 + +/* CCB */ + +#define ccb_cm_mask size_mask(3) +#define ccb_cm_offset 31 + +/* VAS stamped CRB fields */ + +#define vas_buf_num_mask size_mask(6) +#define vas_buf_num_offset 5 +#define send_wc_id_mask size_mask(16) +#define send_wc_id_offset 31 +#define recv_wc_id_mask size_mask(16) +#define recv_wc_id_offset 31 +#define vas_invalid_mask size_mask(1) +#define vas_invalid_offset 31 + +/* NX stamped fault CRB fields */ + +#define nxsf_t_mask size_mask(1) +#define nxsf_t_offset 23 +#define nxsf_fs_mask size_mask(8) +#define nxsf_fs_offset 31 + +/* CPB input */ + +#define in_histlen_mask size_mask(12) +#define in_histlen_offset 11 +#define in_dhtlen_mask size_mask(12) +#define in_dhtlen_offset 31 +#define in_subc_mask size_mask(3) +#define in_subc_offset 31 +#define in_sfbt_mask size_mask(4) +#define in_sfbt_offset 15 +#define in_rembytecnt_mask size_mask(16) +#define in_rembytecnt_offset 31 + +/* CPB output */ + +#define out_tebc_mask size_mask(3) +#define out_tebc_offset 15 +#define out_subc_mask size_mask(16) +#define out_subc_offset 31 +#define out_sfbt_mask size_mask(4) +#define out_sfbt_offset 15 +#define out_rembytecnt_mask size_mask(16) +#define out_rembytecnt_offset 31 +#define out_dhtlen_mask size_mask(12) +#define out_dhtlen_offset 31 + +/* CRB */ + +#define gzip_fc_mask size_mask(8) +#define gzip_fc_offset 31 +#define crb_c_mask size_mask(1) +#define crb_c_offset 28 +#define crb_at_mask size_mask(1) +#define crb_at_offset 30 +#define csb_address_mask ~(15UL) /* mask off bottom 4b */ + +/* + * Access macros for the registers. Do not access registers directly + * because of the endian conversion. P9 processor may run either as + * Little or Big endian. However the NX coprocessor regs are always + * big endian. + * Use the 32 and 64b macros to access respective + * register sizes. + * Use nn forms for the register fields shorter than 32 bits. + */ + +#define getnn(ST, REG) ((be32toh(ST.REG) >> (31-REG##_offset)) \ + & REG##_mask) +#define getpnn(ST, REG) ((be32toh((ST)->REG) >> (31-REG##_offset)) \ + & REG##_mask) +#define get32(ST, REG) (be32toh(ST.REG)) +#define getp32(ST, REG) (be32toh((ST)->REG)) +#define get64(ST, REG) (be64toh(ST.REG)) +#define getp64(ST, REG) (be64toh((ST)->REG)) + +#define unget32(ST, REG) (get32(ST, REG) & ~((REG##_mask) \ + << (31-REG##_offset))) +/* get 32bits less the REG field */ + +#define ungetp32(ST, REG) (getp32(ST, REG) & ~((REG##_mask) \ + << (31-REG##_offset))) +/* get 32bits less the REG field */ + +#define clear_regs(ST) memset((void *)(&(ST)), 0, sizeof(ST)) +#define clear_dde(ST) do { ST.dde_count = ST.ddebc = 0; ST.ddead = 0; \ + } while (0) +#define clearp_dde(ST) do { (ST)->dde_count = (ST)->ddebc = 0; \ + (ST)->ddead = 0; \ + } while (0) +#define clear_struct(ST) memset((void *)(&(ST)), 0, sizeof(ST)) +#define putnn(ST, REG, X) (ST.REG = htobe32(unget32(ST, REG) | (((X) \ + & REG##_mask) << (31-REG##_offset)))) +#define putpnn(ST, REG, X) ((ST)->REG = htobe32(ungetp32(ST, REG) \ + | (((X) & REG##_mask) << (31-REG##_offset)))) + +#define put32(ST, REG, X) (ST.REG = htobe32(X)) +#define putp32(ST, REG, X) ((ST)->REG = htobe32(X)) +#define put64(ST, REG, X) (ST.REG = htobe64(X)) +#define putp64(ST, REG, X) ((ST)->REG = htobe64(X)) + +/* + * Completion extension ce(0) ce(1) ce(2). Bits ce(3-7) + * unused. Section 6.6 Figure 6.7. + */ + +#define get_csb_ce(ST) ((uint32_t)getnn(ST, csb_ce)) +#define get_csb_ce_ms3b(ST) (get_csb_ce(ST) >> 5) +#define put_csb_ce_ms3b(ST, X) putnn(ST, csb_ce, ((uint32_t)(X) << 5)) + +#define CSB_CE_PARTIAL 0x4 +#define CSB_CE_TERMINATE 0x2 +#define CSB_CE_TPBC_VALID 0x1 + +#define csb_ce_termination(X) (!!((X) & CSB_CE_TERMINATE)) +/* termination, output buffers may be modified, SPBC/TPBC invalid Fig.6-7 */ + +#define csb_ce_check_completion(X) (!csb_ce_termination(X)) +/* if not terminated then check full or partial completion */ + +#define csb_ce_partial_completion(X) (!!((X) & CSB_CE_PARTIAL)) +#define csb_ce_full_completion(X) (!csb_ce_partial_completion(X)) +#define csb_ce_tpbc_valid(X) (!!((X) & CSB_CE_TPBC_VALID)) +/* TPBC indicates successfully stored data count */ + +#define csb_ce_default_err(X) csb_ce_termination(X) +/* most error CEs have CE(0)=0 and CE(1)=1 */ + +#define csb_ce_cc3_partial(X) csb_ce_partial_completion(X) +/* some CC=3 are partially completed, Table 6-8 */ + +#define csb_ce_cc64(X) ((X)&(CSB_CE_PARTIAL \ + | CSB_CE_TERMINATE) == 0) +/* Compression: when TPBC>SPBC then CC=64 Table 6-8; target didn't + * compress smaller than source. + */ + +/* Decompress SFBT combinations Tables 5-3, 6-4, 6-6 */ + +#define SFBT_BFINAL 0x1 +#define SFBT_LIT 0x4 +#define SFBT_FHT 0x5 +#define SFBT_DHT 0x6 +#define SFBT_HDR 0x7 + +/* + * NX gzip function codes. Table 6.2. + * Bits 0:4 are the FC. Bit 5 is used by the DMA controller to + * select one of the two Byte Count Limits. + */ + +#define GZIP_FC_LIMIT_MASK 0x01 +#define GZIP_FC_COMPRESS_FHT 0x00 +#define GZIP_FC_COMPRESS_DHT 0x02 +#define GZIP_FC_COMPRESS_FHT_COUNT 0x04 +#define GZIP_FC_COMPRESS_DHT_COUNT 0x06 +#define GZIP_FC_COMPRESS_RESUME_FHT 0x08 +#define GZIP_FC_COMPRESS_RESUME_DHT 0x0a +#define GZIP_FC_COMPRESS_RESUME_FHT_COUNT 0x0c +#define GZIP_FC_COMPRESS_RESUME_DHT_COUNT 0x0e +#define GZIP_FC_DECOMPRESS 0x10 +#define GZIP_FC_DECOMPRESS_SINGLE_BLK_N_SUSPEND 0x12 +#define GZIP_FC_DECOMPRESS_RESUME 0x14 +#define GZIP_FC_DECOMPRESS_RESUME_SINGLE_BLK_N_SUSPEND 0x16 +#define GZIP_FC_WRAP 0x1e + +#define fc_is_compress(fc) (((fc) & 0x10) == 0) +#define fc_has_count(fc) (fc_is_compress(fc) && (((fc) & 0x4) != 0)) + +/* CSB.CC Error codes */ + +#define ERR_NX_OK 0 +#define ERR_NX_ALIGNMENT 1 +#define ERR_NX_OPOVERLAP 2 +#define ERR_NX_DATA_LENGTH 3 +#define ERR_NX_TRANSLATION 5 +#define ERR_NX_PROTECTION 6 +#define ERR_NX_EXTERNAL_UE7 7 +#define ERR_NX_INVALID_OP 8 +#define ERR_NX_PRIVILEGE 9 +#define ERR_NX_INTERNAL_UE 10 +#define ERR_NX_EXTERN_UE_WR 12 +#define ERR_NX_TARGET_SPACE 13 +#define ERR_NX_EXCESSIVE_DDE 14 +#define ERR_NX_TRANSL_WR 15 +#define ERR_NX_PROTECT_WR 16 +#define ERR_NX_SUBFUNCTION 17 +#define ERR_NX_FUNC_ABORT 18 +#define ERR_NX_BYTE_MAX 19 +#define ERR_NX_CORRUPT_CRB 20 +#define ERR_NX_INVALID_CRB 21 +#define ERR_NX_INVALID_DDE 30 +#define ERR_NX_SEGMENTED_DDL 31 +#define ERR_NX_DDE_OVERFLOW 33 +#define ERR_NX_TPBC_GT_SPBC 64 +#define ERR_NX_MISSING_CODE 66 +#define ERR_NX_INVALID_DIST 67 +#define ERR_NX_INVALID_DHT 68 +#define ERR_NX_EXTERNAL_UE90 90 +#define ERR_NX_WDOG_TIMER 224 +#define ERR_NX_AT_FAULT 250 +#define ERR_NX_INTR_SERVER 252 +#define ERR_NX_UE253 253 +#define ERR_NX_NO_HW 254 +#define ERR_NX_HUNG_OP 255 +#define ERR_NX_END 256 + +/* initial values for non-resume operations */ +#define INIT_CRC 0 /* crc32(0L, Z_NULL, 0) */ +#define INIT_ADLER 1 /* adler32(0L, Z_NULL, 0) adler is initialized to 1 */ + +/* prototypes */ +int nxu_submit_job(struct nx_gzip_crb_cpb_t *c, void *handle); + +extern void nxu_sigsegv_handler(int sig, siginfo_t *info, void *ctx); +extern int nxu_touch_pages(void *buf, long buf_len, long page_len, int wr); + +/* caller supplies a print buffer 4*sizeof(crb) */ + +char *nx_crb_str(struct nx_gzip_crb_t *crb, char *prbuf); +char *nx_cpb_str(struct nx_gzip_cpb_t *cpb, char *prbuf); +char *nx_prt_hex(void *cp, int sz, char *prbuf); +char *nx_lzcount_str(struct nx_gzip_cpb_t *cpb, char *prbuf); +char *nx_strerror(int e); + +#ifdef NX_SIM +#include +int nx_sim_init(void *ctx); +int nx_sim_end(void *ctx); +int nxu_run_sim_job(struct nx_gzip_crb_cpb_t *c, void *ctx); +#endif /* NX_SIM */ + +/* Deflate stream manipulation */ + +#define set_final_bit(x) (x |= (unsigned char)1) +#define clr_final_bit(x) (x &= ~(unsigned char)1) + +#define append_empty_fh_blk(p, b) do { *(p) = (2 | (1&(b))); *((p)+1) = 0; \ + } while (0) +/* append 10 bits 0000001b 00...... ; + * assumes appending starts on a byte boundary; b is the final bit. + */ + + +#ifdef NX_842 + +/* 842 Engine */ + +struct nx_eft_crb_t { + union { /* byte[0:3] */ + uint32_t eft_fc; /* bits[29-31] */ + }; + uint32_t reserved1; /* byte[4:7] */ + union { + uint64_t csb_address; /* byte[8:15] */ + struct { + uint32_t reserved2; + union { + uint32_t crb_c; + /* c==0 no ccb defined */ + + uint32_t crb_at; + /* at==0 address type is ignored; + * all addrs effective assumed. + */ + + }; + }; + }; + struct nx_dde_t source_dde; /* byte[16:31] */ + struct nx_dde_t target_dde; /* byte[32:47] */ + struct nx_ccb_t ccb; /* byte[48:63] */ + union { + union nx_qw_t reserved64[3]; /* byte[64:96] */ + }; + struct nx_csb_t csb; +} __aligned(128); + +/* 842 CRB */ + +#define EFT_FC_MASK size_mask(3) +#define EFT_FC_OFFSET 31 +#define EFT_FC_COMPRESS 0x0 +#define EFT_FC_COMPRESS_WITH_CRC 0x1 +#define EFT_FC_DECOMPRESS 0x2 +#define EFT_FC_DECOMPRESS_WITH_CRC 0x3 +#define EFT_FC_BLK_DATA_MOVE 0x4 +#endif /* NX_842 */ + +#endif /* _NXU_H */ -- cgit v1.2.3-59-g8ed1b From 647c734f62f882bb742683cd5f5596f0abadf758 Mon Sep 17 00:00:00 2001 From: Raphael Moreira Zinsly Date: Mon, 20 Apr 2020 17:55:36 -0300 Subject: selftests/powerpc: Add NX-GZIP engine compress testcase Add a compression testcase for the powerpc NX-GZIP engine. Signed-off-by: Bulent Abali Signed-off-by: Raphael Moreira Zinsly Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200420205538.25181-4-rzinsly@linux.ibm.com --- tools/testing/selftests/powerpc/Makefile | 1 + tools/testing/selftests/powerpc/nx-gzip/Makefile | 8 + .../testing/selftests/powerpc/nx-gzip/gzfht_test.c | 433 +++++++++++++++++++++ tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c | 316 +++++++++++++++ .../selftests/powerpc/nx-gzip/nx-gzip-test.sh | 45 +++ 5 files changed, 803 insertions(+) create mode 100644 tools/testing/selftests/powerpc/nx-gzip/Makefile create mode 100644 tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c create mode 100644 tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c create mode 100755 tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 644770c3b754..0830e63818c1 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -19,6 +19,7 @@ SUB_DIRS = alignment \ copyloops \ dscr \ mm \ + nx-gzip \ pmu \ signal \ primitives \ diff --git a/tools/testing/selftests/powerpc/nx-gzip/Makefile b/tools/testing/selftests/powerpc/nx-gzip/Makefile new file mode 100644 index 000000000000..016e528a0a94 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/Makefile @@ -0,0 +1,8 @@ +CFLAGS = -O3 -m64 -I./include + +TEST_GEN_FILES := gzfht_test +TEST_PROGS := nx-gzip-test.sh + +include ../../lib.mk + +$(TEST_GEN_FILES): gzip_vas.c diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c new file mode 100644 index 000000000000..7496a83f9c9d --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* P9 gzip sample code for demonstrating the P9 NX hardware interface. + * Not intended for productive uses or for performance or compression + * ratio measurements. For simplicity of demonstration, this sample + * code compresses in to fixed Huffman blocks only (Deflate btype=1) + * and has very simple memory management. Dynamic Huffman blocks + * (Deflate btype=2) are more involved as detailed in the user guide. + * Note also that /dev/crypto/gzip, VAS and skiboot support are + * required. + * + * Copyright 2020 IBM Corp. + * + * https://github.com/libnxz/power-gzip for zlib api and other utils + * + * Author: Bulent Abali + * + * Definitions of acronyms used here. See + * P9 NX Gzip Accelerator User's Manual for details: + * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf + * + * adler/crc: 32 bit checksums appended to stream tail + * ce: completion extension + * cpb: coprocessor parameter block (metadata) + * crb: coprocessor request block (command) + * csb: coprocessor status block (status) + * dht: dynamic huffman table + * dde: data descriptor element (address, length) + * ddl: list of ddes + * dh/fh: dynamic and fixed huffman types + * fc: coprocessor function code + * histlen: history/dictionary length + * history: sliding window of up to 32KB of data + * lzcount: Deflate LZ symbol counts + * rembytecnt: remaining byte count + * sfbt: source final block type; last block's type during decomp + * spbc: source processed byte count + * subc: source unprocessed bit count + * tebc: target ending bit count; valid bits in the last byte + * tpbc: target processed byte count + * vas: virtual accelerator switch; the user mode interface + */ + +#define _ISOC11_SOURCE // For aligned_alloc() +#define _DEFAULT_SOURCE // For endian.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nxu.h" +#include "nx.h" + +int nx_dbg; +FILE *nx_gzip_log; + +#define NX_MIN(X, Y) (((X) < (Y)) ? (X) : (Y)) +#define FNAME_MAX 1024 +#define FEXT ".nx.gz" + +/* + * LZ counts returned in the user supplied nx_gzip_crb_cpb_t structure. + */ +static int compress_fht_sample(char *src, uint32_t srclen, char *dst, + uint32_t dstlen, int with_count, + struct nx_gzip_crb_cpb_t *cmdp, void *handle) +{ + uint32_t fc; + + assert(!!cmdp); + + put32(cmdp->crb, gzip_fc, 0); /* clear */ + fc = (with_count) ? GZIP_FC_COMPRESS_RESUME_FHT_COUNT : + GZIP_FC_COMPRESS_RESUME_FHT; + putnn(cmdp->crb, gzip_fc, fc); + putnn(cmdp->cpb, in_histlen, 0); /* resuming with no history */ + memset((void *) &cmdp->crb.csb, 0, sizeof(cmdp->crb.csb)); + + /* Section 6.6 programming notes; spbc may be in two different + * places depending on FC. + */ + if (!with_count) + put32(cmdp->cpb, out_spbc_comp, 0); + else + put32(cmdp->cpb, out_spbc_comp_with_count, 0); + + /* Figure 6-3 6-4; CSB location */ + put64(cmdp->crb, csb_address, 0); + put64(cmdp->crb, csb_address, + (uint64_t) &cmdp->crb.csb & csb_address_mask); + + /* Source direct dde (scatter-gather list) */ + clear_dde(cmdp->crb.source_dde); + putnn(cmdp->crb.source_dde, dde_count, 0); + put32(cmdp->crb.source_dde, ddebc, srclen); + put64(cmdp->crb.source_dde, ddead, (uint64_t) src); + + /* Target direct dde (scatter-gather list) */ + clear_dde(cmdp->crb.target_dde); + putnn(cmdp->crb.target_dde, dde_count, 0); + put32(cmdp->crb.target_dde, ddebc, dstlen); + put64(cmdp->crb.target_dde, ddead, (uint64_t) dst); + + /* Submit the crb, the job descriptor, to the accelerator */ + return nxu_submit_job(cmdp, handle); +} + +/* + * Prepares a blank no filename no timestamp gzip header and returns + * the number of bytes written to buf. + * Gzip specification at https://tools.ietf.org/html/rfc1952 + */ +int gzip_header_blank(char *buf) +{ + int i = 0; + + buf[i++] = 0x1f; /* ID1 */ + buf[i++] = 0x8b; /* ID2 */ + buf[i++] = 0x08; /* CM */ + buf[i++] = 0x00; /* FLG */ + buf[i++] = 0x00; /* MTIME */ + buf[i++] = 0x00; /* MTIME */ + buf[i++] = 0x00; /* MTIME */ + buf[i++] = 0x00; /* MTIME */ + buf[i++] = 0x04; /* XFL 4=fastest */ + buf[i++] = 0x03; /* OS UNIX */ + + return i; +} + +/* Caller must free the allocated buffer return nonzero on error. */ +int read_alloc_input_file(char *fname, char **buf, size_t *bufsize) +{ + struct stat statbuf; + FILE *fp; + char *p; + size_t num_bytes; + + if (stat(fname, &statbuf)) { + perror(fname); + return(-1); + } + fp = fopen(fname, "r"); + if (fp == NULL) { + perror(fname); + return(-1); + } + assert(NULL != (p = (char *) malloc(statbuf.st_size))); + num_bytes = fread(p, 1, statbuf.st_size, fp); + if (ferror(fp) || (num_bytes != statbuf.st_size)) { + perror(fname); + return(-1); + } + *buf = p; + *bufsize = num_bytes; + return 0; +} + +/* Returns nonzero on error */ +int write_output_file(char *fname, char *buf, size_t bufsize) +{ + FILE *fp; + size_t num_bytes; + + fp = fopen(fname, "w"); + if (fp == NULL) { + perror(fname); + return(-1); + } + num_bytes = fwrite(buf, 1, bufsize, fp); + if (ferror(fp) || (num_bytes != bufsize)) { + perror(fname); + return(-1); + } + fclose(fp); + return 0; +} + +/* + * Z_SYNC_FLUSH as described in zlib.h. + * Returns number of appended bytes + */ +int append_sync_flush(char *buf, int tebc, int final) +{ + uint64_t flush; + int shift = (tebc & 0x7); + + if (tebc > 0) { + /* Last byte is partially full */ + buf = buf - 1; + *buf = *buf & (unsigned char) ((1< 0) { + *buf++ = (unsigned char) (flush & 0xffULL); + flush = flush >> 8; + shift = shift - 8; + } + return(((tebc > 5) || (tebc == 0)) ? 5 : 4); +} + +/* + * Final deflate block bit. This call assumes the block + * beginning is byte aligned. + */ +static void set_bfinal(void *buf, int bfinal) +{ + char *b = buf; + + if (bfinal) + *b = *b | (unsigned char) 0x01; + else + *b = *b & (unsigned char) 0xfe; +} + +int compress_file(int argc, char **argv, void *handle) +{ + char *inbuf, *outbuf, *srcbuf, *dstbuf; + char outname[FNAME_MAX]; + uint32_t srclen, dstlen; + uint32_t flushlen, chunk; + size_t inlen, outlen, dsttotlen, srctotlen; + uint32_t crc, spbc, tpbc, tebc; + int lzcounts = 0; + int cc; + int num_hdr_bytes; + struct nx_gzip_crb_cpb_t *cmdp; + uint32_t pagelen = 65536; + int fault_tries = NX_MAX_FAULTS; + + cmdp = (void *)(uintptr_t) + aligned_alloc(sizeof(struct nx_gzip_crb_cpb_t), + sizeof(struct nx_gzip_crb_cpb_t)); + + if (argc != 2) { + fprintf(stderr, "usage: %s \n", argv[0]); + exit(-1); + } + if (read_alloc_input_file(argv[1], &inbuf, &inlen)) + exit(-1); + fprintf(stderr, "file %s read, %ld bytes\n", argv[1], inlen); + + /* Generous output buffer for header/trailer */ + outlen = 2 * inlen + 1024; + + assert(NULL != (outbuf = (char *)malloc(outlen))); + nxu_touch_pages(outbuf, outlen, pagelen, 1); + + /* Compress piecemeal in smallish chunks */ + chunk = 1<<22; + + /* Write the gzip header to the stream */ + num_hdr_bytes = gzip_header_blank(outbuf); + dstbuf = outbuf + num_hdr_bytes; + outlen = outlen - num_hdr_bytes; + dsttotlen = num_hdr_bytes; + + srcbuf = inbuf; + srctotlen = 0; + + /* Init the CRB, the coprocessor request block */ + memset(&cmdp->crb, 0, sizeof(cmdp->crb)); + + /* Initial gzip crc32 */ + put32(cmdp->cpb, in_crc, 0); + + while (inlen > 0) { + + /* Submit chunk size source data per job */ + srclen = NX_MIN(chunk, inlen); + /* Supply large target in case data expands */ + dstlen = NX_MIN(2*srclen, outlen); + + /* Page faults are handled by the user code */ + + /* Fault-in pages; an improved code wouldn't touch so + * many pages but would try to estimate the + * compression ratio and adjust both the src and dst + * touch amounts. + */ + nxu_touch_pages(cmdp, sizeof(struct nx_gzip_crb_cpb_t), pagelen, + 1); + nxu_touch_pages(srcbuf, srclen, pagelen, 0); + nxu_touch_pages(dstbuf, dstlen, pagelen, 1); + + cc = compress_fht_sample( + srcbuf, srclen, + dstbuf, dstlen, + lzcounts, cmdp, handle); + + if (cc != ERR_NX_OK && cc != ERR_NX_TPBC_GT_SPBC && + cc != ERR_NX_TRANSLATION) { + fprintf(stderr, "nx error: cc= %d\n", cc); + exit(-1); + } + + /* Page faults are handled by the user code */ + if (cc == ERR_NX_TRANSLATION) { + NXPRT(fprintf(stderr, "page fault: cc= %d, ", cc)); + NXPRT(fprintf(stderr, "try= %d, fsa= %08llx\n", + fault_tries, + (unsigned long long) cmdp->crb.csb.fsaddr)); + fault_tries--; + if (fault_tries > 0) { + continue; + } else { + fprintf(stderr, "error: cannot progress; "); + fprintf(stderr, "too many faults\n"); + exit(-1); + }; + } + + fault_tries = NX_MAX_FAULTS; /* Reset for the next chunk */ + + inlen = inlen - srclen; + srcbuf = srcbuf + srclen; + srctotlen = srctotlen + srclen; + + /* Two possible locations for spbc depending on the function + * code. + */ + spbc = (!lzcounts) ? get32(cmdp->cpb, out_spbc_comp) : + get32(cmdp->cpb, out_spbc_comp_with_count); + assert(spbc == srclen); + + /* Target byte count */ + tpbc = get32(cmdp->crb.csb, tpbc); + /* Target ending bit count */ + tebc = getnn(cmdp->cpb, out_tebc); + NXPRT(fprintf(stderr, "compressed chunk %d ", spbc)); + NXPRT(fprintf(stderr, "to %d bytes, tebc= %d\n", tpbc, tebc)); + + if (inlen > 0) { /* More chunks to go */ + set_bfinal(dstbuf, 0); + dstbuf = dstbuf + tpbc; + dsttotlen = dsttotlen + tpbc; + outlen = outlen - tpbc; + /* Round up to the next byte with a flush + * block; do not set the BFINAqL bit. + */ + flushlen = append_sync_flush(dstbuf, tebc, 0); + dsttotlen = dsttotlen + flushlen; + outlen = outlen - flushlen; + dstbuf = dstbuf + flushlen; + NXPRT(fprintf(stderr, "added sync_flush %d bytes\n", + flushlen)); + } else { /* Done */ + /* Set the BFINAL bit of the last block per Deflate + * specification. + */ + set_bfinal(dstbuf, 1); + dstbuf = dstbuf + tpbc; + dsttotlen = dsttotlen + tpbc; + outlen = outlen - tpbc; + } + + /* Resuming crc32 for the next chunk */ + crc = get32(cmdp->cpb, out_crc); + put32(cmdp->cpb, in_crc, crc); + crc = be32toh(crc); + } + + /* Append crc32 and ISIZE to the end */ + memcpy(dstbuf, &crc, 4); + memcpy(dstbuf+4, &srctotlen, 4); + dsttotlen = dsttotlen + 8; + outlen = outlen - 8; + + assert(FNAME_MAX > (strlen(argv[1]) + strlen(FEXT))); + strcpy(outname, argv[1]); + strcat(outname, FEXT); + if (write_output_file(outname, outbuf, dsttotlen)) { + fprintf(stderr, "write error: %s\n", outname); + exit(-1); + } + + fprintf(stderr, "compressed %ld to %ld bytes total, ", srctotlen, + dsttotlen); + fprintf(stderr, "crc32 checksum = %08x\n", crc); + + if (inbuf != NULL) + free(inbuf); + + if (outbuf != NULL) + free(outbuf); + + return 0; +} + +int main(int argc, char **argv) +{ + int rc; + struct sigaction act; + void *handle; + + nx_dbg = 0; + nx_gzip_log = NULL; + act.sa_handler = 0; + act.sa_sigaction = nxu_sigsegv_handler; + act.sa_flags = SA_SIGINFO; + act.sa_restorer = 0; + sigemptyset(&act.sa_mask); + sigaction(SIGSEGV, &act, NULL); + + handle = nx_function_begin(NX_FUNC_COMP_GZIP, 0); + if (!handle) { + fprintf(stderr, "Unable to init NX, errno %d\n", errno); + exit(-1); + } + + rc = compress_file(argc, argv, handle); + + nx_function_end(handle); + + return rc; +} diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c b/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c new file mode 100644 index 000000000000..c055885da40a --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * Copyright 2020 IBM Corp. + * + * Author: Bulent Abali + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "vas-api.h" +#include "nx.h" +#include "copy-paste.h" +#include "nxu.h" +#include "nx_dbg.h" +#include + +#define barrier() +#define hwsync() ({ asm volatile("sync" ::: "memory"); }) + +#ifndef NX_NO_CPU_PRI +#define cpu_pri_default() ({ asm volatile ("or 2, 2, 2"); }) +#define cpu_pri_low() ({ asm volatile ("or 31, 31, 31"); }) +#else +#define cpu_pri_default() +#define cpu_pri_low() +#endif + +void *nx_fault_storage_address; + +struct nx_handle { + int fd; + int function; + void *paste_addr; +}; + +static int open_device_nodes(char *devname, int pri, struct nx_handle *handle) +{ + int rc, fd; + void *addr; + struct vas_tx_win_open_attr txattr; + + fd = open(devname, O_RDWR); + if (fd < 0) { + fprintf(stderr, " open device name %s\n", devname); + return -errno; + } + + memset(&txattr, 0, sizeof(txattr)); + txattr.version = 1; + txattr.vas_id = pri; + rc = ioctl(fd, VAS_TX_WIN_OPEN, (unsigned long)&txattr); + if (rc < 0) { + fprintf(stderr, "ioctl() n %d, error %d\n", rc, errno); + rc = -errno; + goto out; + } + + addr = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0ULL); + if (addr == MAP_FAILED) { + fprintf(stderr, "mmap() failed, errno %d\n", errno); + rc = -errno; + goto out; + } + handle->fd = fd; + handle->paste_addr = (void *)((char *)addr + 0x400); + + rc = 0; +out: + close(fd); + return rc; +} + +void *nx_function_begin(int function, int pri) +{ + int rc; + char *devname = "/dev/crypto/nx-gzip"; + struct nx_handle *nxhandle; + + if (function != NX_FUNC_COMP_GZIP) { + errno = EINVAL; + fprintf(stderr, " NX_FUNC_COMP_GZIP not found\n"); + return NULL; + } + + + nxhandle = malloc(sizeof(*nxhandle)); + if (!nxhandle) { + errno = ENOMEM; + fprintf(stderr, " No memory\n"); + return NULL; + } + + nxhandle->function = function; + rc = open_device_nodes(devname, pri, nxhandle); + if (rc < 0) { + errno = -rc; + fprintf(stderr, " open_device_nodes failed\n"); + return NULL; + } + + return nxhandle; +} + +int nx_function_end(void *handle) +{ + int rc = 0; + struct nx_handle *nxhandle = handle; + + rc = munmap(nxhandle->paste_addr - 0x400, 4096); + if (rc < 0) { + fprintf(stderr, "munmap() failed, errno %d\n", errno); + return rc; + } + close(nxhandle->fd); + free(nxhandle); + + return rc; +} + +static int nx_wait_for_csb(struct nx_gzip_crb_cpb_t *cmdp) +{ + long poll = 0; + uint64_t t; + + /* Save power and let other threads use the h/w. top may show + * 100% but only because OS doesn't know we slowed the this + * h/w thread while polling. We're letting other threads have + * higher throughput on the core. + */ + cpu_pri_low(); + +#define CSB_MAX_POLL 200000000UL +#define USLEEP_TH 300000UL + + t = __ppc_get_timebase(); + + while (getnn(cmdp->crb.csb, csb_v) == 0) { + ++poll; + hwsync(); + + cpu_pri_low(); + + /* usleep(0) takes around 29000 ticks ~60 us. + * 300000 is spinning for about 600 us then + * start sleeping. + */ + if ((__ppc_get_timebase() - t) > USLEEP_TH) { + cpu_pri_default(); + usleep(1); + } + + if (poll > CSB_MAX_POLL) + break; + + /* Fault address from signal handler */ + if (nx_fault_storage_address) { + cpu_pri_default(); + return -EAGAIN; + } + + } + + cpu_pri_default(); + + /* hw has updated csb and output buffer */ + hwsync(); + + /* Check CSB flags. */ + if (getnn(cmdp->crb.csb, csb_v) == 0) { + fprintf(stderr, "CSB still not valid after %d polls.\n", + (int) poll); + prt_err("CSB still not valid after %d polls, giving up.\n", + (int) poll); + return -ETIMEDOUT; + } + + return 0; +} + +static int nxu_run_job(struct nx_gzip_crb_cpb_t *cmdp, void *handle) +{ + int i, ret, retries; + struct nx_handle *nxhandle = handle; + + assert(handle != NULL); + i = 0; + retries = 5000; + while (i++ < retries) { + hwsync(); + vas_copy(&cmdp->crb, 0); + ret = vas_paste(nxhandle->paste_addr, 0); + hwsync(); + + NXPRT(fprintf(stderr, "Paste attempt %d/%d returns 0x%x\n", + i, retries, ret)); + + if ((ret == 2) || (ret == 3)) { + + ret = nx_wait_for_csb(cmdp); + if (!ret) { + goto out; + } else if (ret == -EAGAIN) { + long x; + + prt_err("Touching address %p, 0x%lx\n", + nx_fault_storage_address, + *(long *) nx_fault_storage_address); + x = *(long *) nx_fault_storage_address; + *(long *) nx_fault_storage_address = x; + nx_fault_storage_address = 0; + continue; + } else { + prt_err("wait_for_csb() returns %d\n", ret); + break; + } + } else { + if (i < 10) { + /* spin for few ticks */ +#define SPIN_TH 500UL + uint64_t fail_spin; + + fail_spin = __ppc_get_timebase(); + while ((__ppc_get_timebase() - fail_spin) < + SPIN_TH) + ; + } else { + /* sleep */ + unsigned int pr = 0; + + if (pr++ % 100 == 0) { + prt_err("Paste attempt %d/", i); + prt_err("%d, failed pid= %d\n", retries, + getpid()); + } + usleep(1); + } + continue; + } + } + +out: + cpu_pri_default(); + + return ret; +} + +int nxu_submit_job(struct nx_gzip_crb_cpb_t *cmdp, void *handle) +{ + int cc; + + cc = nxu_run_job(cmdp, handle); + + if (!cc) + cc = getnn(cmdp->crb.csb, csb_cc); /* CC Table 6-8 */ + + return cc; +} + + +void nxu_sigsegv_handler(int sig, siginfo_t *info, void *ctx) +{ + fprintf(stderr, "%d: Got signal %d si_code %d, si_addr %p\n", getpid(), + sig, info->si_code, info->si_addr); + + nx_fault_storage_address = info->si_addr; +} + +/* + * Fault in pages prior to NX job submission. wr=1 may be required to + * touch writeable pages. System zero pages do not fault-in the page as + * intended. Typically set wr=1 for NX target pages and set wr=0 for NX + * source pages. + */ +int nxu_touch_pages(void *buf, long buf_len, long page_len, int wr) +{ + char *begin = buf; + char *end = (char *) buf + buf_len - 1; + volatile char t; + + assert(buf_len >= 0 && !!buf); + + NXPRT(fprintf(stderr, "touch %p %p len 0x%lx wr=%d\n", buf, + (buf + buf_len), buf_len, wr)); + + if (buf_len <= 0 || buf == NULL) + return -1; + + do { + t = *begin; + if (wr) + *begin = t; + begin = begin + page_len; + } while (begin < end); + + /* When buf_sz is small or buf tail is in another page */ + t = *end; + if (wr) + *end = t; + + return 0; +} diff --git a/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh b/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh new file mode 100755 index 000000000000..7cc7256ba1c7 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-or-later + +if [[ ! -w /dev/crypto/nx-gzip ]]; then + echo "Can't access /dev/crypto/nx-gzip, skipping" + echo "skip: $0" + exit 4 +fi + +set -e + +function cleanup +{ + rm -f nx-tempfile* +} + +trap cleanup EXIT + +function test_sizes +{ + local n=$1 + local fname="nx-tempfile.$n" + + for size in 4K 64K 1M 64M + do + echo "Testing $size ($n) ..." + dd if=/dev/urandom of=$fname bs=$size count=1 + ./gzfht_test $fname + done +} + +echo "Doing basic test of different sizes ..." +test_sizes 0 + +echo "Running tests in parallel ..." +for i in {1..16} +do + test_sizes $i & +done + +wait + +echo "OK" + +exit 0 -- cgit v1.2.3-59-g8ed1b From 841fb73ad2195ac7d79ce970fa3d7ed7a5bb0ecd Mon Sep 17 00:00:00 2001 From: Raphael Moreira Zinsly Date: Mon, 20 Apr 2020 17:55:37 -0300 Subject: selftests/powerpc: Add NX-GZIP engine decompress testcase Include a decompression testcase for the powerpc NX-GZIP engine. Signed-off-by: Bulent Abali Signed-off-by: Raphael Moreira Zinsly Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200420205538.25181-5-rzinsly@linux.ibm.com --- tools/testing/selftests/powerpc/nx-gzip/Makefile | 2 +- .../testing/selftests/powerpc/nx-gzip/gunz_test.c | 1028 ++++++++++++++++++++ .../selftests/powerpc/nx-gzip/nx-gzip-test.sh | 1 + 3 files changed, 1030 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/nx-gzip/gunz_test.c diff --git a/tools/testing/selftests/powerpc/nx-gzip/Makefile b/tools/testing/selftests/powerpc/nx-gzip/Makefile index 016e528a0a94..640fad6cc2c7 100644 --- a/tools/testing/selftests/powerpc/nx-gzip/Makefile +++ b/tools/testing/selftests/powerpc/nx-gzip/Makefile @@ -1,6 +1,6 @@ CFLAGS = -O3 -m64 -I./include -TEST_GEN_FILES := gzfht_test +TEST_GEN_FILES := gzfht_test gunz_test TEST_PROGS := nx-gzip-test.sh include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c b/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c new file mode 100644 index 000000000000..6ee0fded0391 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c @@ -0,0 +1,1028 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* P9 gunzip sample code for demonstrating the P9 NX hardware + * interface. Not intended for productive uses or for performance or + * compression ratio measurements. Note also that /dev/crypto/gzip, + * VAS and skiboot support are required + * + * Copyright 2020 IBM Corp. + * + * Author: Bulent Abali + * + * https://github.com/libnxz/power-gzip for zlib api and other utils + * Definitions of acronyms used here. See + * P9 NX Gzip Accelerator User's Manual for details: + * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf + * + * adler/crc: 32 bit checksums appended to stream tail + * ce: completion extension + * cpb: coprocessor parameter block (metadata) + * crb: coprocessor request block (command) + * csb: coprocessor status block (status) + * dht: dynamic huffman table + * dde: data descriptor element (address, length) + * ddl: list of ddes + * dh/fh: dynamic and fixed huffman types + * fc: coprocessor function code + * histlen: history/dictionary length + * history: sliding window of up to 32KB of data + * lzcount: Deflate LZ symbol counts + * rembytecnt: remaining byte count + * sfbt: source final block type; last block's type during decomp + * spbc: source processed byte count + * subc: source unprocessed bit count + * tebc: target ending bit count; valid bits in the last byte + * tpbc: target processed byte count + * vas: virtual accelerator switch; the user mode interface + */ + +#define _ISOC11_SOURCE // For aligned_alloc() +#define _DEFAULT_SOURCE // For endian.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nxu.h" +#include "nx.h" +#include "crb.h" + +int nx_dbg; +FILE *nx_gzip_log; + +#define NX_MIN(X, Y) (((X) < (Y))?(X):(Y)) +#define NX_MAX(X, Y) (((X) > (Y))?(X):(Y)) + +#define GETINPC(X) fgetc(X) +#define FNAME_MAX 1024 + +/* fifo queue management */ +#define fifo_used_bytes(used) (used) +#define fifo_free_bytes(used, len) ((len)-(used)) +/* amount of free bytes in the first and last parts */ +#define fifo_free_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \ + ? (len)-((cur)+(used)) : 0) +#define fifo_free_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \ + ? (cur) : (len)-(used)) +/* amount of used bytes in the first and last parts */ +#define fifo_used_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \ + ? (used) : (len)-(cur)) +#define fifo_used_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \ + ? 0 : ((used)+(cur))-(len)) +/* first and last free parts start here */ +#define fifo_free_first_offset(cur, used) ((cur)+(used)) +#define fifo_free_last_offset(cur, used, len) \ + fifo_used_last_bytes(cur, used, len) +/* first and last used parts start here */ +#define fifo_used_first_offset(cur) (cur) +#define fifo_used_last_offset(cur) (0) + +const int fifo_in_len = 1<<24; +const int fifo_out_len = 1<<24; +const int page_sz = 1<<16; +const int line_sz = 1<<7; +const int window_max = 1<<15; + +/* + * Adds an (address, len) pair to the list of ddes (ddl) and updates + * the base dde. ddl[0] is the only dde in a direct dde which + * contains a single (addr,len) pair. For more pairs, ddl[0] becomes + * the indirect (base) dde that points to a list of direct ddes. + * See Section 6.4 of the NX-gzip user manual for DDE description. + * Addr=NULL, len=0 clears the ddl[0]. Returns the total number of + * bytes in ddl. Caller is responsible for allocting the array of + * nx_dde_t *ddl. If N addresses are required in the scatter-gather + * list, the ddl array must have N+1 entries minimum. + */ +static inline uint32_t nx_append_dde(struct nx_dde_t *ddl, void *addr, + uint32_t len) +{ + uint32_t ddecnt; + uint32_t bytes; + + if (addr == NULL && len == 0) { + clearp_dde(ddl); + return 0; + } + + NXPRT(fprintf(stderr, "%d: %s addr %p len %x\n", __LINE__, addr, + __func__, len)); + + /* Number of ddes in the dde list ; == 0 when it is a direct dde */ + ddecnt = getpnn(ddl, dde_count); + bytes = getp32(ddl, ddebc); + + if (ddecnt == 0 && bytes == 0) { + /* First dde is unused; make it a direct dde */ + bytes = len; + putp32(ddl, ddebc, bytes); + putp64(ddl, ddead, (uint64_t) addr); + } else if (ddecnt == 0) { + /* Converting direct to indirect dde + * ddl[0] becomes head dde of ddl + * copy direct to indirect first. + */ + ddl[1] = ddl[0]; + + /* Add the new dde next */ + clear_dde(ddl[2]); + put32(ddl[2], ddebc, len); + put64(ddl[2], ddead, (uint64_t) addr); + + /* Ddl head points to 2 direct ddes */ + ddecnt = 2; + putpnn(ddl, dde_count, ddecnt); + bytes = bytes + len; + putp32(ddl, ddebc, bytes); + /* Pointer to the first direct dde */ + putp64(ddl, ddead, (uint64_t) &ddl[1]); + } else { + /* Append a dde to an existing indirect ddl */ + ++ddecnt; + clear_dde(ddl[ddecnt]); + put64(ddl[ddecnt], ddead, (uint64_t) addr); + put32(ddl[ddecnt], ddebc, len); + + putpnn(ddl, dde_count, ddecnt); + bytes = bytes + len; + putp32(ddl, ddebc, bytes); /* byte sum of all dde */ + } + return bytes; +} + +/* + * Touch specified number of pages represented in number bytes + * beginning from the first buffer in a dde list. + * Do not touch the pages past buf_sz-th byte's page. + * + * Set buf_sz = 0 to touch all pages described by the ddep. + */ +static int nx_touch_pages_dde(struct nx_dde_t *ddep, long buf_sz, long page_sz, + int wr) +{ + uint32_t indirect_count; + uint32_t buf_len; + long total; + uint64_t buf_addr; + struct nx_dde_t *dde_list; + int i; + + assert(!!ddep); + + indirect_count = getpnn(ddep, dde_count); + + NXPRT(fprintf(stderr, "%s dde_count %d request len ", __func__, + indirect_count)); + NXPRT(fprintf(stderr, "0x%lx\n", buf_sz)); + + if (indirect_count == 0) { + /* Direct dde */ + buf_len = getp32(ddep, ddebc); + buf_addr = getp64(ddep, ddead); + + NXPRT(fprintf(stderr, "touch direct ddebc 0x%x ddead %p\n", + buf_len, (void *)buf_addr)); + + if (buf_sz == 0) + nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr); + else + nxu_touch_pages((void *)buf_addr, NX_MIN(buf_len, + buf_sz), page_sz, wr); + + return ERR_NX_OK; + } + + /* Indirect dde */ + if (indirect_count > MAX_DDE_COUNT) + return ERR_NX_EXCESSIVE_DDE; + + /* First address of the list */ + dde_list = (struct nx_dde_t *) getp64(ddep, ddead); + + if (buf_sz == 0) + buf_sz = getp32(ddep, ddebc); + + total = 0; + for (i = 0; i < indirect_count; i++) { + buf_len = get32(dde_list[i], ddebc); + buf_addr = get64(dde_list[i], ddead); + total += buf_len; + + NXPRT(fprintf(stderr, "touch loop len 0x%x ddead %p total ", + buf_len, (void *)buf_addr)); + NXPRT(fprintf(stderr, "0x%lx\n", total)); + + /* Touching fewer pages than encoded in the ddebc */ + if (total > buf_sz) { + buf_len = NX_MIN(buf_len, total - buf_sz); + nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr); + NXPRT(fprintf(stderr, "touch loop break len 0x%x ", + buf_len)); + NXPRT(fprintf(stderr, "ddead %p\n", (void *)buf_addr)); + break; + } + nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr); + } + return ERR_NX_OK; +} + +/* + * Src and dst buffers are supplied in scatter gather lists. + * NX function code and other parameters supplied in cmdp. + */ +static int nx_submit_job(struct nx_dde_t *src, struct nx_dde_t *dst, + struct nx_gzip_crb_cpb_t *cmdp, void *handle) +{ + uint64_t csbaddr; + + memset((void *)&cmdp->crb.csb, 0, sizeof(cmdp->crb.csb)); + + cmdp->crb.source_dde = *src; + cmdp->crb.target_dde = *dst; + + /* Status, output byte count in tpbc */ + csbaddr = ((uint64_t) &cmdp->crb.csb) & csb_address_mask; + put64(cmdp->crb, csb_address, csbaddr); + + /* NX reports input bytes in spbc; cleared */ + cmdp->cpb.out_spbc_comp_wrap = 0; + cmdp->cpb.out_spbc_comp_with_count = 0; + cmdp->cpb.out_spbc_decomp = 0; + + /* Clear output */ + put32(cmdp->cpb, out_crc, INIT_CRC); + put32(cmdp->cpb, out_adler, INIT_ADLER); + + /* Submit the crb, the job descriptor, to the accelerator. */ + return nxu_submit_job(cmdp, handle); +} + +int decompress_file(int argc, char **argv, void *devhandle) +{ + FILE *inpf = NULL; + FILE *outf = NULL; + + int c, expect, i, cc, rc = 0; + char gzfname[FNAME_MAX]; + + /* Queuing, file ops, byte counting */ + char *fifo_in, *fifo_out; + int used_in, cur_in, used_out, cur_out, read_sz, n; + int first_free, last_free, first_used, last_used; + int first_offset, last_offset; + int write_sz, free_space, source_sz; + int source_sz_estimate, target_sz_estimate; + uint64_t last_comp_ratio = 0; /* 1000 max */ + uint64_t total_out = 0; + int is_final, is_eof; + + /* nx hardware */ + int sfbt, subc, spbc, tpbc, nx_ce, fc, resuming = 0; + int history_len = 0; + struct nx_gzip_crb_cpb_t cmd, *cmdp; + struct nx_dde_t *ddl_in; + struct nx_dde_t dde_in[6] __aligned(128); + struct nx_dde_t *ddl_out; + struct nx_dde_t dde_out[6] __aligned(128); + int pgfault_retries; + + /* when using mmap'ed files */ + off_t input_file_offset; + + if (argc > 2) { + fprintf(stderr, "usage: %s or stdin\n", argv[0]); + fprintf(stderr, " writes to stdout or .nx.gunzip\n"); + return -1; + } + + if (argc == 1) { + inpf = stdin; + outf = stdout; + } else if (argc == 2) { + char w[1024]; + char *wp; + + inpf = fopen(argv[1], "r"); + if (inpf == NULL) { + perror(argv[1]); + return -1; + } + + /* Make a new file name to write to. Ignoring '.gz' */ + wp = (NULL != (wp = strrchr(argv[1], '/'))) ? (wp+1) : argv[1]; + strcpy(w, wp); + strcat(w, ".nx.gunzip"); + + outf = fopen(w, "w"); + if (outf == NULL) { + perror(w); + return -1; + } + } + + /* Decode the gzip header */ + c = GETINPC(inpf); expect = 0x1f; /* ID1 */ + if (c != expect) + goto err1; + + c = GETINPC(inpf); expect = 0x8b; /* ID2 */ + if (c != expect) + goto err1; + + c = GETINPC(inpf); expect = 0x08; /* CM */ + if (c != expect) + goto err1; + + int flg = GETINPC(inpf); /* FLG */ + + if (flg & 0xE0 || flg & 0x4 || flg == EOF) + goto err2; + + fprintf(stderr, "gzHeader FLG %x\n", flg); + + /* Read 6 bytes; ignoring the MTIME, XFL, OS fields in this + * sample code. + */ + for (i = 0; i < 6; i++) { + char tmp[10]; + + tmp[i] = GETINPC(inpf); + if (tmp[i] == EOF) + goto err3; + fprintf(stderr, "%02x ", tmp[i]); + if (i == 5) + fprintf(stderr, "\n"); + } + fprintf(stderr, "gzHeader MTIME, XFL, OS ignored\n"); + + /* FNAME */ + if (flg & 0x8) { + int k = 0; + + do { + c = GETINPC(inpf); + if (c == EOF || k >= FNAME_MAX) + goto err3; + gzfname[k++] = c; + } while (c); + fprintf(stderr, "gzHeader FNAME: %s\n", gzfname); + } + + /* FHCRC */ + if (flg & 0x2) { + c = GETINPC(inpf); + if (c == EOF) + goto err3; + c = GETINPC(inpf); + if (c == EOF) + goto err3; + fprintf(stderr, "gzHeader FHCRC: ignored\n"); + } + + used_in = cur_in = used_out = cur_out = 0; + is_final = is_eof = 0; + + /* Allocate one page larger to prevent page faults due to NX + * overfetching. + * Either do this (char*)(uintptr_t)aligned_alloc or use + * -std=c11 flag to make the int-to-pointer warning go away. + */ + assert((fifo_in = (char *)(uintptr_t)aligned_alloc(line_sz, + fifo_in_len + page_sz)) != NULL); + assert((fifo_out = (char *)(uintptr_t)aligned_alloc(line_sz, + fifo_out_len + page_sz + line_sz)) != NULL); + /* Leave unused space due to history rounding rules */ + fifo_out = fifo_out + line_sz; + nxu_touch_pages(fifo_out, fifo_out_len, page_sz, 1); + + ddl_in = &dde_in[0]; + ddl_out = &dde_out[0]; + cmdp = &cmd; + memset(&cmdp->crb, 0, sizeof(cmdp->crb)); + +read_state: + + /* Read from .gz file */ + + NXPRT(fprintf(stderr, "read_state:\n")); + + if (is_eof != 0) + goto write_state; + + /* We read in to fifo_in in two steps: first: read in to from + * cur_in to the end of the buffer. last: if free space wrapped + * around, read from fifo_in offset 0 to offset cur_in. + */ + + /* Reset fifo head to reduce unnecessary wrap arounds */ + cur_in = (used_in == 0) ? 0 : cur_in; + + /* Free space total is reduced by a gap */ + free_space = NX_MAX(0, fifo_free_bytes(used_in, fifo_in_len) + - line_sz); + + /* Free space may wrap around as first and last */ + first_free = fifo_free_first_bytes(cur_in, used_in, fifo_in_len); + last_free = fifo_free_last_bytes(cur_in, used_in, fifo_in_len); + + /* Start offsets of the free memory */ + first_offset = fifo_free_first_offset(cur_in, used_in); + last_offset = fifo_free_last_offset(cur_in, used_in, fifo_in_len); + + /* Reduce read_sz because of the line_sz gap */ + read_sz = NX_MIN(free_space, first_free); + n = 0; + if (read_sz > 0) { + /* Read in to offset cur_in + used_in */ + n = fread(fifo_in + first_offset, 1, read_sz, inpf); + used_in = used_in + n; + free_space = free_space - n; + assert(n <= read_sz); + if (n != read_sz) { + /* Either EOF or error; exit the read loop */ + is_eof = 1; + goto write_state; + } + } + + /* If free space wrapped around */ + if (last_free > 0) { + /* Reduce read_sz because of the line_sz gap */ + read_sz = NX_MIN(free_space, last_free); + n = 0; + if (read_sz > 0) { + n = fread(fifo_in + last_offset, 1, read_sz, inpf); + used_in = used_in + n; /* Increase used space */ + free_space = free_space - n; /* Decrease free space */ + assert(n <= read_sz); + if (n != read_sz) { + /* Either EOF or error; exit the read loop */ + is_eof = 1; + goto write_state; + } + } + } + + /* At this point we have used_in bytes in fifo_in with the + * data head starting at cur_in and possibly wrapping around. + */ + +write_state: + + /* Write decompressed data to output file */ + + NXPRT(fprintf(stderr, "write_state:\n")); + + if (used_out == 0) + goto decomp_state; + + /* If fifo_out has data waiting, write it out to the file to + * make free target space for the accelerator used bytes in + * the first and last parts of fifo_out. + */ + + first_used = fifo_used_first_bytes(cur_out, used_out, fifo_out_len); + last_used = fifo_used_last_bytes(cur_out, used_out, fifo_out_len); + + write_sz = first_used; + + n = 0; + if (write_sz > 0) { + n = fwrite(fifo_out + cur_out, 1, write_sz, outf); + used_out = used_out - n; + /* Move head of the fifo */ + cur_out = (cur_out + n) % fifo_out_len; + assert(n <= write_sz); + if (n != write_sz) { + fprintf(stderr, "error: write\n"); + rc = -1; + goto err5; + } + } + + if (last_used > 0) { /* If more data available in the last part */ + write_sz = last_used; /* Keep it here for later */ + n = 0; + if (write_sz > 0) { + n = fwrite(fifo_out, 1, write_sz, outf); + used_out = used_out - n; + cur_out = (cur_out + n) % fifo_out_len; + assert(n <= write_sz); + if (n != write_sz) { + fprintf(stderr, "error: write\n"); + rc = -1; + goto err5; + } + } + } + +decomp_state: + + /* NX decompresses input data */ + + NXPRT(fprintf(stderr, "decomp_state:\n")); + + if (is_final) + goto finish_state; + + /* Address/len lists */ + clearp_dde(ddl_in); + clearp_dde(ddl_out); + + /* FC, CRC, HistLen, Table 6-6 */ + if (resuming) { + /* Resuming a partially decompressed input. + * The key to resume is supplying the 32KB + * dictionary (history) to NX, which is basically + * the last 32KB of output produced. + */ + fc = GZIP_FC_DECOMPRESS_RESUME; + + cmdp->cpb.in_crc = cmdp->cpb.out_crc; + cmdp->cpb.in_adler = cmdp->cpb.out_adler; + + /* Round up the history size to quadword. Section 2.10 */ + history_len = (history_len + 15) / 16; + putnn(cmdp->cpb, in_histlen, history_len); + history_len = history_len * 16; /* bytes */ + + if (history_len > 0) { + /* Chain in the history buffer to the DDE list */ + if (cur_out >= history_len) { + nx_append_dde(ddl_in, fifo_out + + (cur_out - history_len), + history_len); + } else { + nx_append_dde(ddl_in, fifo_out + + ((fifo_out_len + cur_out) + - history_len), + history_len - cur_out); + /* Up to 32KB history wraps around fifo_out */ + nx_append_dde(ddl_in, fifo_out, cur_out); + } + + } + } else { + /* First decompress job */ + fc = GZIP_FC_DECOMPRESS; + + history_len = 0; + /* Writing 0 clears out subc as well */ + cmdp->cpb.in_histlen = 0; + total_out = 0; + + put32(cmdp->cpb, in_crc, INIT_CRC); + put32(cmdp->cpb, in_adler, INIT_ADLER); + put32(cmdp->cpb, out_crc, INIT_CRC); + put32(cmdp->cpb, out_adler, INIT_ADLER); + + /* Assuming 10% compression ratio initially; use the + * most recently measured compression ratio as a + * heuristic to estimate the input and output + * sizes. If we give too much input, the target buffer + * overflows and NX cycles are wasted, and then we + * must retry with smaller input size. 1000 is 100%. + */ + last_comp_ratio = 100UL; + } + cmdp->crb.gzip_fc = 0; + putnn(cmdp->crb, gzip_fc, fc); + + /* + * NX source buffers + */ + first_used = fifo_used_first_bytes(cur_in, used_in, fifo_in_len); + last_used = fifo_used_last_bytes(cur_in, used_in, fifo_in_len); + + if (first_used > 0) + nx_append_dde(ddl_in, fifo_in + cur_in, first_used); + + if (last_used > 0) + nx_append_dde(ddl_in, fifo_in, last_used); + + /* + * NX target buffers + */ + first_free = fifo_free_first_bytes(cur_out, used_out, fifo_out_len); + last_free = fifo_free_last_bytes(cur_out, used_out, fifo_out_len); + + /* Reduce output free space amount not to overwrite the history */ + int target_max = NX_MAX(0, fifo_free_bytes(used_out, fifo_out_len) + - (1<<16)); + + NXPRT(fprintf(stderr, "target_max %d (0x%x)\n", target_max, + target_max)); + + first_free = NX_MIN(target_max, first_free); + if (first_free > 0) { + first_offset = fifo_free_first_offset(cur_out, used_out); + nx_append_dde(ddl_out, fifo_out + first_offset, first_free); + } + + if (last_free > 0) { + last_free = NX_MIN(target_max - first_free, last_free); + if (last_free > 0) { + last_offset = fifo_free_last_offset(cur_out, used_out, + fifo_out_len); + nx_append_dde(ddl_out, fifo_out + last_offset, + last_free); + } + } + + /* Target buffer size is used to limit the source data size + * based on previous measurements of compression ratio. + */ + + /* source_sz includes history */ + source_sz = getp32(ddl_in, ddebc); + assert(source_sz > history_len); + source_sz = source_sz - history_len; + + /* Estimating how much source is needed to 3/4 fill a + * target_max size target buffer. If we overshoot, then NX + * must repeat the job with smaller input and we waste + * bandwidth. If we undershoot then we use more NX calls than + * necessary. + */ + + source_sz_estimate = ((uint64_t)target_max * last_comp_ratio * 3UL) + / 4000; + + if (source_sz_estimate < source_sz) { + /* Target might be small, therefore limiting the + * source data. + */ + source_sz = source_sz_estimate; + target_sz_estimate = target_max; + } else { + /* Source file might be small, therefore limiting target + * touch pages to a smaller value to save processor cycles. + */ + target_sz_estimate = ((uint64_t)source_sz * 1000UL) + / (last_comp_ratio + 1); + target_sz_estimate = NX_MIN(2 * target_sz_estimate, + target_max); + } + + source_sz = source_sz + history_len; + + /* Some NX condition codes require submitting the NX job again. + * Kernel doesn't handle NX page faults. Expects user code to + * touch pages. + */ + pgfault_retries = NX_MAX_FAULTS; + +restart_nx: + + putp32(ddl_in, ddebc, source_sz); + + /* Fault in pages */ + nxu_touch_pages(cmdp, sizeof(struct nx_gzip_crb_cpb_t), page_sz, 1); + nx_touch_pages_dde(ddl_in, 0, page_sz, 0); + nx_touch_pages_dde(ddl_out, target_sz_estimate, page_sz, 1); + + /* Send job to NX */ + cc = nx_submit_job(ddl_in, ddl_out, cmdp, devhandle); + + switch (cc) { + + case ERR_NX_TRANSLATION: + + /* We touched the pages ahead of time. In the most common case + * we shouldn't be here. But may be some pages were paged out. + * Kernel should have placed the faulting address to fsaddr. + */ + NXPRT(fprintf(stderr, "ERR_NX_TRANSLATION %p\n", + (void *)cmdp->crb.csb.fsaddr)); + + if (pgfault_retries == NX_MAX_FAULTS) { + /* Try once with exact number of pages */ + --pgfault_retries; + goto restart_nx; + } else if (pgfault_retries > 0) { + /* If still faulting try fewer input pages + * assuming memory outage + */ + if (source_sz > page_sz) + source_sz = NX_MAX(source_sz / 2, page_sz); + --pgfault_retries; + goto restart_nx; + } else { + fprintf(stderr, "cannot make progress; too many "); + fprintf(stderr, "page fault retries cc= %d\n", cc); + rc = -1; + goto err5; + } + + case ERR_NX_DATA_LENGTH: + + NXPRT(fprintf(stderr, "ERR_NX_DATA_LENGTH; ")); + NXPRT(fprintf(stderr, "stream may have trailing data\n")); + + /* Not an error in the most common case; it just says + * there is trailing data that we must examine. + * + * CC=3 CE(1)=0 CE(0)=1 indicates partial completion + * Fig.6-7 and Table 6-8. + */ + nx_ce = get_csb_ce_ms3b(cmdp->crb.csb); + + if (!csb_ce_termination(nx_ce) && + csb_ce_partial_completion(nx_ce)) { + /* Check CPB for more information + * spbc and tpbc are valid + */ + sfbt = getnn(cmdp->cpb, out_sfbt); /* Table 6-4 */ + subc = getnn(cmdp->cpb, out_subc); /* Table 6-4 */ + spbc = get32(cmdp->cpb, out_spbc_decomp); + tpbc = get32(cmdp->crb.csb, tpbc); + assert(target_max >= tpbc); + + goto ok_cc3; /* not an error */ + } else { + /* History length error when CE(1)=1 CE(0)=0. */ + rc = -1; + fprintf(stderr, "history length error cc= %d\n", cc); + goto err5; + } + + case ERR_NX_TARGET_SPACE: + + /* Target buffer not large enough; retry smaller input + * data; give at least 1 byte. SPBC/TPBC are not valid. + */ + assert(source_sz > history_len); + source_sz = ((source_sz - history_len + 2) / 2) + history_len; + NXPRT(fprintf(stderr, "ERR_NX_TARGET_SPACE; retry with ")); + NXPRT(fprintf(stderr, "smaller input data src %d hist %d\n", + source_sz, history_len)); + goto restart_nx; + + case ERR_NX_OK: + + /* This should not happen for gzip formatted data; + * we need trailing crc and isize + */ + fprintf(stderr, "ERR_NX_OK\n"); + spbc = get32(cmdp->cpb, out_spbc_decomp); + tpbc = get32(cmdp->crb.csb, tpbc); + assert(target_max >= tpbc); + assert(spbc >= history_len); + source_sz = spbc - history_len; + goto offsets_state; + + default: + fprintf(stderr, "error: cc= %d\n", cc); + rc = -1; + goto err5; + } + +ok_cc3: + + NXPRT(fprintf(stderr, "cc3: sfbt: %x\n", sfbt)); + + assert(spbc > history_len); + source_sz = spbc - history_len; + + /* Table 6-4: Source Final Block Type (SFBT) describes the + * last processed deflate block and clues the software how to + * resume the next job. SUBC indicates how many input bits NX + * consumed but did not process. SPBC indicates how many + * bytes of source were given to the accelerator including + * history bytes. + */ + + switch (sfbt) { + int dhtlen; + + case 0x0: /* Deflate final EOB received */ + + /* Calculating the checksum start position. */ + + source_sz = source_sz - subc / 8; + is_final = 1; + break; + + /* Resume decompression cases are below. Basically + * indicates where NX has suspended and how to resume + * the input stream. + */ + + case 0x8: /* Within a literal block; use rembytecount */ + case 0x9: /* Within a literal block; use rembytecount; bfinal=1 */ + + /* Supply the partially processed source byte again */ + source_sz = source_sz - ((subc + 7) / 8); + + /* SUBC LS 3bits: number of bits in the first source byte need + * to be processed. + * 000 means all 8 bits; Table 6-3 + * Clear subc, histlen, sfbt, rembytecnt, dhtlen + */ + cmdp->cpb.in_subc = 0; + cmdp->cpb.in_sfbt = 0; + putnn(cmdp->cpb, in_subc, subc % 8); + putnn(cmdp->cpb, in_sfbt, sfbt); + putnn(cmdp->cpb, in_rembytecnt, getnn(cmdp->cpb, + out_rembytecnt)); + break; + + case 0xA: /* Within a FH block; */ + case 0xB: /* Within a FH block; bfinal=1 */ + + source_sz = source_sz - ((subc + 7) / 8); + + /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */ + cmdp->cpb.in_subc = 0; + cmdp->cpb.in_sfbt = 0; + putnn(cmdp->cpb, in_subc, subc % 8); + putnn(cmdp->cpb, in_sfbt, sfbt); + break; + + case 0xC: /* Within a DH block; */ + case 0xD: /* Within a DH block; bfinal=1 */ + + source_sz = source_sz - ((subc + 7) / 8); + + /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */ + cmdp->cpb.in_subc = 0; + cmdp->cpb.in_sfbt = 0; + putnn(cmdp->cpb, in_subc, subc % 8); + putnn(cmdp->cpb, in_sfbt, sfbt); + + dhtlen = getnn(cmdp->cpb, out_dhtlen); + putnn(cmdp->cpb, in_dhtlen, dhtlen); + assert(dhtlen >= 42); + + /* Round up to a qword */ + dhtlen = (dhtlen + 127) / 128; + + while (dhtlen > 0) { /* Copy dht from cpb.out to cpb.in */ + --dhtlen; + cmdp->cpb.in_dht[dhtlen] = cmdp->cpb.out_dht[dhtlen]; + } + break; + + case 0xE: /* Within a block header; bfinal=0; */ + /* Also given if source data exactly ends (SUBC=0) with + * EOB code with BFINAL=0. Means the next byte will + * contain a block header. + */ + case 0xF: /* within a block header with BFINAL=1. */ + + source_sz = source_sz - ((subc + 7) / 8); + + /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */ + cmdp->cpb.in_subc = 0; + cmdp->cpb.in_sfbt = 0; + putnn(cmdp->cpb, in_subc, subc % 8); + putnn(cmdp->cpb, in_sfbt, sfbt); + + /* Engine did not process any data */ + if (is_eof && (source_sz == 0)) + is_final = 1; + } + +offsets_state: + + /* Adjust the source and target buffer offsets and lengths */ + + NXPRT(fprintf(stderr, "offsets_state:\n")); + + /* Delete input data from fifo_in */ + used_in = used_in - source_sz; + cur_in = (cur_in + source_sz) % fifo_in_len; + input_file_offset = input_file_offset + source_sz; + + /* Add output data to fifo_out */ + used_out = used_out + tpbc; + + assert(used_out <= fifo_out_len); + + total_out = total_out + tpbc; + + /* Deflate history is 32KB max. No need to supply more + * than 32KB on a resume. + */ + history_len = (total_out > window_max) ? window_max : total_out; + + /* To estimate expected expansion in the next NX job; 500 means 50%. + * Deflate best case is around 1 to 1000. + */ + last_comp_ratio = (1000UL * ((uint64_t)source_sz + 1)) + / ((uint64_t)tpbc + 1); + last_comp_ratio = NX_MAX(NX_MIN(1000UL, last_comp_ratio), 1); + NXPRT(fprintf(stderr, "comp_ratio %ld source_sz %d spbc %d tpbc %d\n", + last_comp_ratio, source_sz, spbc, tpbc)); + + resuming = 1; + +finish_state: + + NXPRT(fprintf(stderr, "finish_state:\n")); + + if (is_final) { + if (used_out) + goto write_state; /* More data to write out */ + else if (used_in < 8) { + /* Need at least 8 more bytes containing gzip crc + * and isize. + */ + rc = -1; + goto err4; + } else { + /* Compare checksums and exit */ + int i; + unsigned char tail[8]; + uint32_t cksum, isize; + + for (i = 0; i < 8; i++) + tail[i] = fifo_in[(cur_in + i) % fifo_in_len]; + fprintf(stderr, "computed checksum %08x isize %08x\n", + cmdp->cpb.out_crc, (uint32_t) (total_out + % (1ULL<<32))); + cksum = ((uint32_t) tail[0] | (uint32_t) tail[1]<<8 + | (uint32_t) tail[2]<<16 + | (uint32_t) tail[3]<<24); + isize = ((uint32_t) tail[4] | (uint32_t) tail[5]<<8 + | (uint32_t) tail[6]<<16 + | (uint32_t) tail[7]<<24); + fprintf(stderr, "stored checksum %08x isize %08x\n", + cksum, isize); + + if (cksum == cmdp->cpb.out_crc && isize == (uint32_t) + (total_out % (1ULL<<32))) { + rc = 0; goto ok1; + } else { + rc = -1; goto err4; + } + } + } else + goto read_state; + + return -1; + +err1: + fprintf(stderr, "error: not a gzip file, expect %x, read %x\n", + expect, c); + return -1; + +err2: + fprintf(stderr, "error: the FLG byte is wrong or not being handled\n"); + return -1; + +err3: + fprintf(stderr, "error: gzip header\n"); + return -1; + +err4: + fprintf(stderr, "error: checksum missing or mismatch\n"); + +err5: +ok1: + fprintf(stderr, "decomp is complete: fclose\n"); + fclose(outf); + + return rc; +} + + +int main(int argc, char **argv) +{ + int rc; + struct sigaction act; + void *handle; + + nx_dbg = 0; + nx_gzip_log = NULL; + act.sa_handler = 0; + act.sa_sigaction = nxu_sigsegv_handler; + act.sa_flags = SA_SIGINFO; + act.sa_restorer = 0; + sigemptyset(&act.sa_mask); + sigaction(SIGSEGV, &act, NULL); + + handle = nx_function_begin(NX_FUNC_COMP_GZIP, 0); + if (!handle) { + fprintf(stderr, "Unable to init NX, errno %d\n", errno); + exit(-1); + } + + rc = decompress_file(argc, argv, handle); + + nx_function_end(handle); + + return rc; +} diff --git a/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh b/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh index 7cc7256ba1c7..c7b46c5fd7b3 100755 --- a/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh +++ b/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh @@ -26,6 +26,7 @@ function test_sizes echo "Testing $size ($n) ..." dd if=/dev/urandom of=$fname bs=$size count=1 ./gzfht_test $fname + ./gunz_test ${fname}.nx.gz done } -- cgit v1.2.3-59-g8ed1b From 722c1963aba5a86778f7d044116e10e1c73e87a8 Mon Sep 17 00:00:00 2001 From: Raphael Moreira Zinsly Date: Mon, 20 Apr 2020 17:55:38 -0300 Subject: selftests/powerpc: Add README for GZIP engine tests Include a README file with the instructions to use the testcases at selftests/powerpc/nx-gzip. Signed-off-by: Bulent Abali Signed-off-by: Raphael Moreira Zinsly Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200420205538.25181-6-rzinsly@linux.ibm.com --- .../selftests/powerpc/nx-gzip/99-nx-gzip.rules | 1 + tools/testing/selftests/powerpc/nx-gzip/README | 45 ++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules create mode 100644 tools/testing/selftests/powerpc/nx-gzip/README diff --git a/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules b/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules new file mode 100644 index 000000000000..5a7118495cb3 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules @@ -0,0 +1 @@ +SUBSYSTEM=="nxgzip", KERNEL=="nx-gzip", MODE="0666" diff --git a/tools/testing/selftests/powerpc/nx-gzip/README b/tools/testing/selftests/powerpc/nx-gzip/README new file mode 100644 index 000000000000..9809dbaa1905 --- /dev/null +++ b/tools/testing/selftests/powerpc/nx-gzip/README @@ -0,0 +1,45 @@ +Test the nx-gzip function: +========================= + +Verify that following device exists: + /dev/crypto/nx-gzip +If you get a permission error run as sudo or set the device permissions: + sudo chmod go+rw /dev/crypto/nx-gzip +However, chmod may not survive across boots. You may create a udev file such +as: + /etc/udev/rules.d/99-nx-gzip.rules + + +To manually build and run: +$ gcc -O3 -I./include -o gzfht_test gzfht_test.c gzip_vas.c +$ gcc -O3 -I./include -o gunz_test gunz_test.c gzip_vas.c + + +Compress any file using Fixed Huffman mode. Output will have a .nx.gz suffix: +$ ./gzfht_test gzip_vas.c +file gzip_vas.c read, 6413 bytes +compressed 6413 to 3124 bytes total, crc32 checksum = abd15e8a + + +Uncompress the previous output. Output will have a .nx.gunzip suffix: +./gunz_test gzip_vas.c.nx.gz +gzHeader FLG 0 +00 00 00 00 04 03 +gzHeader MTIME, XFL, OS ignored +computed checksum abd15e8a isize 0000190d +stored checksum abd15e8a isize 0000190d +decomp is complete: fclose + + +Compare two files: +$ sha1sum gzip_vas.c.nx.gz.nx.gunzip gzip_vas.c +bf43e3c0c3651f5f22b6f9784cd9b1eeab4120b6 gzip_vas.c.nx.gz.nx.gunzip +bf43e3c0c3651f5f22b6f9784cd9b1eeab4120b6 gzip_vas.c + + +Note that the code here are intended for testing the nx-gzip hardware function. +They are not intended for demonstrating performance or compression ratio. +By being simplistic these selftests expect to allocate the entire set of source +and target pages in the memory so it needs enough memory to work. +For more information and source code consider using: +https://github.com/libnxz/power-gzip -- cgit v1.2.3-59-g8ed1b From 45591da765885f7320a111d290b3a28a23eed359 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 22 Apr 2020 15:41:29 +1000 Subject: powerpc/vas: Include linux/types.h in uapi/asm/vas-api.h allyesconfig fails with: ./usr/include/asm/vas-api.h:15:2: error: unknown type name '__u32' 15 | __u32 version; | ^~~~~ ./usr/include/asm/vas-api.h:16:2: error: unknown type name '__s16' 16 | __s16 vas_id; /* specific instance of vas or -1 for default */ | ^~~~~ ./usr/include/asm/vas-api.h:17:2: error: unknown type name '__u16' 17 | __u16 reserved1; | ^~~~~ ./usr/include/asm/vas-api.h:18:2: error: unknown type name '__u64' 18 | __u64 flags; /* Future use */ | ^~~~~ ./usr/include/asm/vas-api.h:19:2: error: unknown type name '__u64' 19 | __u64 reserved2[6]; | ^~~~~ uapi headers should be self contained, so add an include of linux/types.h. Fixes: 45f25a79fe50 ("powerpc/vas: Define VAS_TX_WIN_OPEN ioctl API") Signed-off-by: Stephen Rothwell Acked-by: Haren Myneni [mpe: Flesh out change log from linux-next error report] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200422154129.11f988fd@canb.auug.org.au --- arch/powerpc/include/uapi/asm/vas-api.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h index fe95d67e3bab..ebd4b2424785 100644 --- a/arch/powerpc/include/uapi/asm/vas-api.h +++ b/arch/powerpc/include/uapi/asm/vas-api.h @@ -6,6 +6,8 @@ #ifndef _UAPI_MISC_VAS_H #define _UAPI_MISC_VAS_H +#include + #include #define VAS_MAGIC 'v' -- cgit v1.2.3-59-g8ed1b From e4a884cc28fa3f5d8b81de46998ffe29b4ad169e Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Tue, 7 Apr 2020 14:17:39 +0530 Subject: powerpc: Move idle_loop_prolog()/epilog() functions to header file Currently prior to entering an idle state on a Linux Guest, the pseries cpuidle driver implement an idle_loop_prolog() and idle_loop_epilog() functions which ensure that idle_purr is correctly computed, and the hypervisor is informed that the CPU cycles have been donated. These prolog and epilog functions are also required in the default idle call, i.e pseries_lpar_idle(). Hence move these accessor functions to a common header file and call them from pseries_lpar_idle(). Since the existing header files such as asm/processor.h have enough clutter, create a new header file asm/idle.h. Finally rename idle_loop_prolog() and idle_loop_epilog() to pseries_idle_prolog() and pseries_idle_epilog() as they are only relavent for on pseries guests. Signed-off-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1586249263-14048-2-git-send-email-ego@linux.vnet.ibm.com --- arch/powerpc/include/asm/idle.h | 31 +++++++++++++++++++++++++++++ arch/powerpc/platforms/pseries/setup.c | 7 +++++-- drivers/cpuidle/cpuidle-pseries.c | 36 +++++++--------------------------- 3 files changed, 43 insertions(+), 31 deletions(-) create mode 100644 arch/powerpc/include/asm/idle.h diff --git a/arch/powerpc/include/asm/idle.h b/arch/powerpc/include/asm/idle.h new file mode 100644 index 000000000000..32064a4c0dd7 --- /dev/null +++ b/arch/powerpc/include/asm/idle.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_IDLE_H +#define _ASM_POWERPC_IDLE_H +#include +#include + +#ifdef CONFIG_PPC_PSERIES +static inline void pseries_idle_prolog(unsigned long *in_purr) +{ + ppc64_runlatch_off(); + *in_purr = mfspr(SPRN_PURR); + /* + * Indicate to the HV that we are idle. Now would be + * a good time to find other work to dispatch. + */ + get_lppaca()->idle = 1; +} + +static inline void pseries_idle_epilog(unsigned long in_purr) +{ + u64 wait_cycles; + + wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); + wait_cycles += mfspr(SPRN_PURR) - in_purr; + get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); + get_lppaca()->idle = 0; + + ppc64_runlatch_on(); +} +#endif /* CONFIG_PPC_PSERIES */ +#endif diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 0c8421dd01ab..2f53e6b031a7 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include @@ -319,6 +320,8 @@ machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache); static void pseries_lpar_idle(void) { + unsigned long in_purr; + /* * Default handler to go into low thread priority and possibly * low power mode by ceding processor to hypervisor @@ -328,7 +331,7 @@ static void pseries_lpar_idle(void) return; /* Indicate to hypervisor that we are idle. */ - get_lppaca()->idle = 1; + pseries_idle_prolog(&in_purr); /* * Yield the processor to the hypervisor. We return if @@ -339,7 +342,7 @@ static void pseries_lpar_idle(void) */ cede_processor(); - get_lppaca()->idle = 0; + pseries_idle_epilog(in_purr); } /* diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 74c247972bb3..46d5e05fcf97 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -19,6 +19,7 @@ #include #include #include +#include #include struct cpuidle_driver pseries_idle_driver = { @@ -31,29 +32,6 @@ static struct cpuidle_state *cpuidle_state_table __read_mostly; static u64 snooze_timeout __read_mostly; static bool snooze_timeout_en __read_mostly; -static inline void idle_loop_prolog(unsigned long *in_purr) -{ - ppc64_runlatch_off(); - *in_purr = mfspr(SPRN_PURR); - /* - * Indicate to the HV that we are idle. Now would be - * a good time to find other work to dispatch. - */ - get_lppaca()->idle = 1; -} - -static inline void idle_loop_epilog(unsigned long in_purr) -{ - u64 wait_cycles; - - wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); - wait_cycles += mfspr(SPRN_PURR) - in_purr; - get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); - get_lppaca()->idle = 0; - - ppc64_runlatch_on(); -} - static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -63,7 +41,7 @@ static int snooze_loop(struct cpuidle_device *dev, set_thread_flag(TIF_POLLING_NRFLAG); - idle_loop_prolog(&in_purr); + pseries_idle_prolog(&in_purr); local_irq_enable(); snooze_exit_time = get_tb() + snooze_timeout; @@ -87,7 +65,7 @@ static int snooze_loop(struct cpuidle_device *dev, local_irq_disable(); - idle_loop_epilog(in_purr); + pseries_idle_epilog(in_purr); return index; } @@ -115,7 +93,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, { unsigned long in_purr; - idle_loop_prolog(&in_purr); + pseries_idle_prolog(&in_purr); get_lppaca()->donate_dedicated_cpu = 1; HMT_medium(); @@ -124,7 +102,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, local_irq_disable(); get_lppaca()->donate_dedicated_cpu = 0; - idle_loop_epilog(in_purr); + pseries_idle_epilog(in_purr); return index; } @@ -135,7 +113,7 @@ static int shared_cede_loop(struct cpuidle_device *dev, { unsigned long in_purr; - idle_loop_prolog(&in_purr); + pseries_idle_prolog(&in_purr); /* * Yield the processor to the hypervisor. We return if @@ -147,7 +125,7 @@ static int shared_cede_loop(struct cpuidle_device *dev, check_and_cede_processor(); local_irq_disable(); - idle_loop_epilog(in_purr); + pseries_idle_epilog(in_purr); return index; } -- cgit v1.2.3-59-g8ed1b From c4019198cfa81224d32846915cd401e981f81b81 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Tue, 7 Apr 2020 14:17:40 +0530 Subject: powerpc/idle: Store PURR snapshot in a per-cpu global variable Currently when CPU goes idle, we take a snapshot of PURR via pseries_idle_prolog() which is used at the CPU idle exit to compute the idle PURR cycles via the function pseries_idle_epilog(). Thus, the value of idle PURR cycle thus read before pseries_idle_prolog() and after pseries_idle_epilog() is always correct. However, if we were to read the idle PURR cycles from an interrupt context between pseries_idle_prolog() and pseries_idle_epilog() (this will be done in a future patch), then, the value of the idle PURR thus read will not include the cycles spent in the most recent idle period. Thus, in that interrupt context, we will need access to the snapshot of the PURR before going idle, in order to compute the idle PURR cycles for the latest idle duration. In this patch, we save the snapshot of PURR in pseries_idle_prolog() in a per-cpu variable, instead of on the stack, so that it can be accessed from an interrupt context. Signed-off-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1586249263-14048-3-git-send-email-ego@linux.vnet.ibm.com --- arch/powerpc/include/asm/idle.h | 31 ++++++++++++++++++++++--------- arch/powerpc/platforms/pseries/setup.c | 7 +++---- drivers/cpuidle/cpuidle-pseries.c | 15 ++++++--------- 3 files changed, 31 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/include/asm/idle.h b/arch/powerpc/include/asm/idle.h index 32064a4c0dd7..b90d75aa1f9e 100644 --- a/arch/powerpc/include/asm/idle.h +++ b/arch/powerpc/include/asm/idle.h @@ -5,10 +5,27 @@ #include #ifdef CONFIG_PPC_PSERIES -static inline void pseries_idle_prolog(unsigned long *in_purr) +DECLARE_PER_CPU(u64, idle_entry_purr_snap); + +static inline void snapshot_purr_idle_entry(void) +{ + *this_cpu_ptr(&idle_entry_purr_snap) = mfspr(SPRN_PURR); +} + +static inline void update_idle_purr_accounting(void) +{ + u64 wait_cycles; + u64 in_purr = *this_cpu_ptr(&idle_entry_purr_snap); + + wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); + wait_cycles += mfspr(SPRN_PURR) - in_purr; + get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); +} + +static inline void pseries_idle_prolog(void) { ppc64_runlatch_off(); - *in_purr = mfspr(SPRN_PURR); + snapshot_purr_idle_entry(); /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. @@ -16,16 +33,12 @@ static inline void pseries_idle_prolog(unsigned long *in_purr) get_lppaca()->idle = 1; } -static inline void pseries_idle_epilog(unsigned long in_purr) +static inline void pseries_idle_epilog(void) { - u64 wait_cycles; - - wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); - wait_cycles += mfspr(SPRN_PURR) - in_purr; - get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); + update_idle_purr_accounting(); get_lppaca()->idle = 0; - ppc64_runlatch_on(); } + #endif /* CONFIG_PPC_PSERIES */ #endif diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 2f53e6b031a7..4905c965e111 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -318,10 +318,9 @@ static int alloc_dispatch_log_kmem_cache(void) } machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache); +DEFINE_PER_CPU(u64, idle_entry_purr_snap); static void pseries_lpar_idle(void) { - unsigned long in_purr; - /* * Default handler to go into low thread priority and possibly * low power mode by ceding processor to hypervisor @@ -331,7 +330,7 @@ static void pseries_lpar_idle(void) return; /* Indicate to hypervisor that we are idle. */ - pseries_idle_prolog(&in_purr); + pseries_idle_prolog(); /* * Yield the processor to the hypervisor. We return if @@ -342,7 +341,7 @@ static void pseries_lpar_idle(void) */ cede_processor(); - pseries_idle_epilog(in_purr); + pseries_idle_epilog(); } /* diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 46d5e05fcf97..6513ef2af66a 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -36,12 +36,11 @@ static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - unsigned long in_purr; u64 snooze_exit_time; set_thread_flag(TIF_POLLING_NRFLAG); - pseries_idle_prolog(&in_purr); + pseries_idle_prolog(); local_irq_enable(); snooze_exit_time = get_tb() + snooze_timeout; @@ -65,7 +64,7 @@ static int snooze_loop(struct cpuidle_device *dev, local_irq_disable(); - pseries_idle_epilog(in_purr); + pseries_idle_epilog(); return index; } @@ -91,9 +90,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - unsigned long in_purr; - pseries_idle_prolog(&in_purr); + pseries_idle_prolog(); get_lppaca()->donate_dedicated_cpu = 1; HMT_medium(); @@ -102,7 +100,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, local_irq_disable(); get_lppaca()->donate_dedicated_cpu = 0; - pseries_idle_epilog(in_purr); + pseries_idle_epilog(); return index; } @@ -111,9 +109,8 @@ static int shared_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - unsigned long in_purr; - pseries_idle_prolog(&in_purr); + pseries_idle_prolog(); /* * Yield the processor to the hypervisor. We return if @@ -125,7 +122,7 @@ static int shared_cede_loop(struct cpuidle_device *dev, check_and_cede_processor(); local_irq_disable(); - pseries_idle_epilog(in_purr); + pseries_idle_epilog(); return index; } -- cgit v1.2.3-59-g8ed1b From dc8afce5f45b099e3ea52a16b2f90e92f90f3af0 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Tue, 7 Apr 2020 14:17:41 +0530 Subject: powerpc/pseries: Account for SPURR ticks on idle CPUs On Pseries LPARs, to calculate utilization, we need to know the [S]PURR ticks when the CPUs were busy or idle. Via pseries_idle_prolog(), pseries_idle_epilog(), we track the idle PURR ticks in the VPA variable "wait_state_cycles". This patch extends the support to account for the idle SPURR ticks. Signed-off-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1586249263-14048-4-git-send-email-ego@linux.vnet.ibm.com --- arch/powerpc/include/asm/idle.h | 17 +++++++++++++++++ arch/powerpc/platforms/pseries/setup.c | 2 ++ 2 files changed, 19 insertions(+) diff --git a/arch/powerpc/include/asm/idle.h b/arch/powerpc/include/asm/idle.h index b90d75aa1f9e..0efb25071d87 100644 --- a/arch/powerpc/include/asm/idle.h +++ b/arch/powerpc/include/asm/idle.h @@ -5,13 +5,20 @@ #include #ifdef CONFIG_PPC_PSERIES +DECLARE_PER_CPU(u64, idle_spurr_cycles); DECLARE_PER_CPU(u64, idle_entry_purr_snap); +DECLARE_PER_CPU(u64, idle_entry_spurr_snap); static inline void snapshot_purr_idle_entry(void) { *this_cpu_ptr(&idle_entry_purr_snap) = mfspr(SPRN_PURR); } +static inline void snapshot_spurr_idle_entry(void) +{ + *this_cpu_ptr(&idle_entry_spurr_snap) = mfspr(SPRN_SPURR); +} + static inline void update_idle_purr_accounting(void) { u64 wait_cycles; @@ -22,10 +29,19 @@ static inline void update_idle_purr_accounting(void) get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); } +static inline void update_idle_spurr_accounting(void) +{ + u64 *idle_spurr_cycles_ptr = this_cpu_ptr(&idle_spurr_cycles); + u64 in_spurr = *this_cpu_ptr(&idle_entry_spurr_snap); + + *idle_spurr_cycles_ptr += mfspr(SPRN_SPURR) - in_spurr; +} + static inline void pseries_idle_prolog(void) { ppc64_runlatch_off(); snapshot_purr_idle_entry(); + snapshot_spurr_idle_entry(); /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. @@ -36,6 +52,7 @@ static inline void pseries_idle_prolog(void) static inline void pseries_idle_epilog(void) { update_idle_purr_accounting(); + update_idle_spurr_accounting(); get_lppaca()->idle = 0; ppc64_runlatch_on(); } diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 4905c965e111..1b55e804927d 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -318,7 +318,9 @@ static int alloc_dispatch_log_kmem_cache(void) } machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache); +DEFINE_PER_CPU(u64, idle_spurr_cycles); DEFINE_PER_CPU(u64, idle_entry_purr_snap); +DEFINE_PER_CPU(u64, idle_entry_spurr_snap); static void pseries_lpar_idle(void) { /* -- cgit v1.2.3-59-g8ed1b From 6909f179ca7a73f243dca7c829facca1cc1d4ff5 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Tue, 7 Apr 2020 14:17:42 +0530 Subject: powerpc/sysfs: Show idle_purr and idle_spurr for every CPU On Pseries LPARs, to calculate utilization, we need to know the [S]PURR ticks when the CPUs were busy or idle. The total PURR and SPURR ticks are already exposed via the per-cpu sysfs files "purr" and "spurr". This patch adds support for exposing the idle PURR and SPURR ticks via new per-cpu sysfs files named "idle_purr" and "idle_spurr". This patch also adds helper functions to accurately read the values of idle_purr and idle_spurr especially from an interrupt context between when the interrupt has occurred between the pseries_idle_prolog() and pseries_idle_epilog(). This will ensure that the idle purr/spurr values corresponding to the latest idle period is accounted for before these values are read. Signed-off-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1586249263-14048-5-git-send-email-ego@linux.vnet.ibm.com --- arch/powerpc/include/asm/idle.h | 32 ++++++++++++++++ arch/powerpc/kernel/sysfs.c | 82 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 111 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/idle.h b/arch/powerpc/include/asm/idle.h index 0efb25071d87..accd1f50085a 100644 --- a/arch/powerpc/include/asm/idle.h +++ b/arch/powerpc/include/asm/idle.h @@ -57,5 +57,37 @@ static inline void pseries_idle_epilog(void) ppc64_runlatch_on(); } +static inline u64 read_this_idle_purr(void) +{ + /* + * If we are reading from an idle context, update the + * idle-purr cycles corresponding to the last idle period. + * Since the idle context is not yet over, take a fresh + * snapshot of the idle-purr. + */ + if (unlikely(get_lppaca()->idle == 1)) { + update_idle_purr_accounting(); + snapshot_purr_idle_entry(); + } + + return be64_to_cpu(get_lppaca()->wait_state_cycles); +} + +static inline u64 read_this_idle_spurr(void) +{ + /* + * If we are reading from an idle context, update the + * idle-spurr cycles corresponding to the last idle period. + * Since the idle context is not yet over, take a fresh + * snapshot of the idle-spurr. + */ + if (get_lppaca()->idle == 1) { + update_idle_spurr_accounting(); + snapshot_spurr_idle_entry(); + } + + return *this_cpu_ptr(&idle_spurr_cycles); +} + #endif /* CONFIG_PPC_PSERIES */ #endif diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 479c70680b76..571b3259697e 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "cacheinfo.h" @@ -760,6 +761,74 @@ static void create_svm_file(void) } #endif /* CONFIG_PPC_SVM */ +#ifdef CONFIG_PPC_PSERIES +static void read_idle_purr(void *val) +{ + u64 *ret = val; + + *ret = read_this_idle_purr(); +} + +static ssize_t idle_purr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + u64 val; + + smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1); + return sprintf(buf, "%llx\n", val); +} +static DEVICE_ATTR(idle_purr, 0400, idle_purr_show, NULL); + +static void create_idle_purr_file(struct device *s) +{ + if (firmware_has_feature(FW_FEATURE_LPAR)) + device_create_file(s, &dev_attr_idle_purr); +} + +static void remove_idle_purr_file(struct device *s) +{ + if (firmware_has_feature(FW_FEATURE_LPAR)) + device_remove_file(s, &dev_attr_idle_purr); +} + +static void read_idle_spurr(void *val) +{ + u64 *ret = val; + + *ret = read_this_idle_spurr(); +} + +static ssize_t idle_spurr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + u64 val; + + smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1); + return sprintf(buf, "%llx\n", val); +} +static DEVICE_ATTR(idle_spurr, 0400, idle_spurr_show, NULL); + +static void create_idle_spurr_file(struct device *s) +{ + if (firmware_has_feature(FW_FEATURE_LPAR)) + device_create_file(s, &dev_attr_idle_spurr); +} + +static void remove_idle_spurr_file(struct device *s) +{ + if (firmware_has_feature(FW_FEATURE_LPAR)) + device_remove_file(s, &dev_attr_idle_spurr); +} + +#else /* CONFIG_PPC_PSERIES */ +#define create_idle_purr_file(s) +#define remove_idle_purr_file(s) +#define create_idle_spurr_file(s) +#define remove_idle_spurr_file(s) +#endif /* CONFIG_PPC_PSERIES */ + static int register_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); @@ -823,10 +892,13 @@ static int register_cpu_online(unsigned int cpu) if (!firmware_has_feature(FW_FEATURE_LPAR)) add_write_permission_dev_attr(&dev_attr_purr); device_create_file(s, &dev_attr_purr); + create_idle_purr_file(s); } - if (cpu_has_feature(CPU_FTR_SPURR)) + if (cpu_has_feature(CPU_FTR_SPURR)) { device_create_file(s, &dev_attr_spurr); + create_idle_spurr_file(s); + } if (cpu_has_feature(CPU_FTR_DSCR)) device_create_file(s, &dev_attr_dscr); @@ -910,11 +982,15 @@ static int unregister_cpu_online(unsigned int cpu) device_remove_file(s, &dev_attr_mmcra); #endif /* CONFIG_PMU_SYSFS */ - if (cpu_has_feature(CPU_FTR_PURR)) + if (cpu_has_feature(CPU_FTR_PURR)) { device_remove_file(s, &dev_attr_purr); + remove_idle_purr_file(s); + } - if (cpu_has_feature(CPU_FTR_SPURR)) + if (cpu_has_feature(CPU_FTR_SPURR)) { device_remove_file(s, &dev_attr_spurr); + remove_idle_spurr_file(s); + } if (cpu_has_feature(CPU_FTR_DSCR)) device_remove_file(s, &dev_attr_dscr); -- cgit v1.2.3-59-g8ed1b From bde752c3d6dbe9f6ca346560198e66bc3d7d7238 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Tue, 7 Apr 2020 14:17:43 +0530 Subject: Documentation: Document sysfs interfaces purr, spurr, idle_purr, idle_spurr Add documentation for the following sysfs interfaces: /sys/devices/system/cpu/cpuX/purr /sys/devices/system/cpu/cpuX/spurr /sys/devices/system/cpu/cpuX/idle_purr /sys/devices/system/cpu/cpuX/idle_spurr Signed-off-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1586249263-14048-6-git-send-email-ego@linux.vnet.ibm.com --- Documentation/ABI/testing/sysfs-devices-system-cpu | 39 ++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 2e0e3b45d02a..b73b8b5c81f3 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -580,3 +580,42 @@ Description: Secure Virtual Machine If 1, it means the system is using the Protected Execution Facility in POWER9 and newer processors. i.e., it is a Secure Virtual Machine. + +What: /sys/devices/system/cpu/cpuX/purr +Date: Apr 2005 +Contact: Linux for PowerPC mailing list +Description: PURR ticks for this CPU since the system boot. + + The Processor Utilization Resources Register (PURR) is + a 64-bit counter which provides an estimate of the + resources used by the CPU thread. The contents of this + register increases monotonically. This sysfs interface + exposes the number of PURR ticks for cpuX. + +What: /sys/devices/system/cpu/cpuX/spurr +Date: Dec 2006 +Contact: Linux for PowerPC mailing list +Description: SPURR ticks for this CPU since the system boot. + + The Scaled Processor Utilization Resources Register + (SPURR) is a 64-bit counter that provides a frequency + invariant estimate of the resources used by the CPU + thread. The contents of this register increases + monotonically. This sysfs interface exposes the number + of SPURR ticks for cpuX. + +What: /sys/devices/system/cpu/cpuX/idle_purr +Date: Apr 2020 +Contact: Linux for PowerPC mailing list +Description: PURR ticks for cpuX when it was idle. + + This sysfs interface exposes the number of PURR ticks + for cpuX when it was idle. + +What: /sys/devices/system/cpu/cpuX/idle_spurr +Date: Apr 2020 +Contact: Linux for PowerPC mailing list +Description: SPURR ticks for cpuX when it was idle. + + This sysfs interface exposes the number of SPURR ticks + for cpuX when it was idle. -- cgit v1.2.3-59-g8ed1b From 57b3ed941b5542aaebcd9f59369571bbce9d6dcc Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 22 Apr 2020 14:56:12 +0530 Subject: powerpc/64: Have MPROFILE_KERNEL depend on FUNCTION_TRACER Currently, it is possible to have CONFIG_FUNCTION_TRACER disabled, but CONFIG_MPROFILE_KERNEL enabled. Though all existing users of MPROFILE_KERNEL are doing the right thing, it is weird to have MPROFILE_KERNEL enabled when the function tracer isn't. Fix this by making MPROFILE_KERNEL depend on FUNCTION_TRACER. Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200422092612.514301-1-naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 924c541a9260..8324d98728db 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -476,7 +476,7 @@ config LD_HEAD_STUB_CATCH If unsure, say "N". config MPROFILE_KERNEL - depends on PPC64 && CPU_LITTLE_ENDIAN + depends on PPC64 && CPU_LITTLE_ENDIAN && FUNCTION_TRACER def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__) config HOTPLUG_CPU -- cgit v1.2.3-59-g8ed1b From 334710b1496af8a0960e70121f850e209c20958f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 17 Apr 2020 17:08:51 +0000 Subject: powerpc/uaccess: Implement unsafe_put_user() using 'asm goto' unsafe_put_user() is designed to take benefit of 'asm goto'. Instead of using the standard __put_user() approach and branch based on the returned error, use 'asm goto' and make the exception code branch directly to the error label. There is no code anymore in the fixup section. This change significantly simplifies functions using unsafe_put_user() Small exemple of the benefit with the following code: struct test { u32 item1; u16 item2; u8 item3; u64 item4; }; int set_test_to_user(struct test __user *test, u32 item1, u16 item2, u8 item3, u64 item4) { unsafe_put_user(item1, &test->item1, failed); unsafe_put_user(item2, &test->item2, failed); unsafe_put_user(item3, &test->item3, failed); unsafe_put_user(item4, &test->item4, failed); return 0; failed: return -EFAULT; } Before the patch: 00000be8 : be8: 39 20 00 00 li r9,0 bec: 90 83 00 00 stw r4,0(r3) bf0: 2f 89 00 00 cmpwi cr7,r9,0 bf4: 40 9e 00 38 bne cr7,c2c bf8: b0 a3 00 04 sth r5,4(r3) bfc: 2f 89 00 00 cmpwi cr7,r9,0 c00: 40 9e 00 2c bne cr7,c2c c04: 98 c3 00 06 stb r6,6(r3) c08: 2f 89 00 00 cmpwi cr7,r9,0 c0c: 40 9e 00 20 bne cr7,c2c c10: 90 e3 00 08 stw r7,8(r3) c14: 91 03 00 0c stw r8,12(r3) c18: 21 29 00 00 subfic r9,r9,0 c1c: 7d 29 49 10 subfe r9,r9,r9 c20: 38 60 ff f2 li r3,-14 c24: 7d 23 18 38 and r3,r9,r3 c28: 4e 80 00 20 blr c2c: 38 60 ff f2 li r3,-14 c30: 4e 80 00 20 blr 00000000 <.fixup>: ... b8: 39 20 ff f2 li r9,-14 bc: 48 00 00 00 b bc <.fixup+0xbc> bc: R_PPC_REL24 .text+0xbf0 c0: 39 20 ff f2 li r9,-14 c4: 48 00 00 00 b c4 <.fixup+0xc4> c4: R_PPC_REL24 .text+0xbfc c8: 39 20 ff f2 li r9,-14 cc: 48 00 00 00 b cc <.fixup+0xcc> d0: 39 20 ff f2 li r9,-14 d4: 48 00 00 00 b d4 <.fixup+0xd4> d4: R_PPC_REL24 .text+0xc18 00000000 <__ex_table>: ... a0: R_PPC_REL32 .text+0xbec a4: R_PPC_REL32 .fixup+0xb8 a8: R_PPC_REL32 .text+0xbf8 ac: R_PPC_REL32 .fixup+0xc0 b0: R_PPC_REL32 .text+0xc04 b4: R_PPC_REL32 .fixup+0xc8 b8: R_PPC_REL32 .text+0xc10 bc: R_PPC_REL32 .fixup+0xd0 c0: R_PPC_REL32 .text+0xc14 c4: R_PPC_REL32 .fixup+0xd0 After the patch: 00000be8 : be8: 90 83 00 00 stw r4,0(r3) bec: b0 a3 00 04 sth r5,4(r3) bf0: 98 c3 00 06 stb r6,6(r3) bf4: 90 e3 00 08 stw r7,8(r3) bf8: 91 03 00 0c stw r8,12(r3) bfc: 38 60 00 00 li r3,0 c00: 4e 80 00 20 blr c04: 38 60 ff f2 li r3,-14 c08: 4e 80 00 20 blr 00000000 <__ex_table>: ... a0: R_PPC_REL32 .text+0xbe8 a4: R_PPC_REL32 .text+0xc04 a8: R_PPC_REL32 .text+0xbec ac: R_PPC_REL32 .text+0xc04 b0: R_PPC_REL32 .text+0xbf0 b4: R_PPC_REL32 .text+0xc04 b8: R_PPC_REL32 .text+0xbf4 bc: R_PPC_REL32 .text+0xc04 c0: R_PPC_REL32 .text+0xbf8 c4: R_PPC_REL32 .text+0xc04 Signed-off-by: Christophe Leroy Reviewed-by: Segher Boessenkool Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/23e680624680a9a5405f4b88740d2596d4b17c26.1587143308.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/uaccess.h | 61 ++++++++++++++++++++++++++++++++------ 1 file changed, 52 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 0969285996cb..3f30a1dbc198 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -93,12 +93,12 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #define __get_user(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) #define __put_user(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true) + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __put_user_goto(x, ptr, label) \ + __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) #define __get_user_allowed(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) -#define __put_user_allowed(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false) #define __get_user_inatomic(x, ptr) \ __get_user_nosleep((x), (ptr), sizeof(*(ptr))) @@ -162,7 +162,7 @@ do { \ prevent_write_to_user(ptr, size); \ } while (0) -#define __put_user_nocheck(x, ptr, size, do_allow) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ @@ -172,10 +172,7 @@ do { \ if (!is_kernel_addr((unsigned long)__pu_addr)) \ might_fault(); \ __chk_user_ptr(__pu_addr); \ - if (do_allow) \ - __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ - else \ - __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ \ __pu_err; \ }) @@ -208,6 +205,52 @@ do { \ }) +#define __put_user_asm_goto(x, addr, label, op) \ + asm volatile goto( \ + "1: " op "%U1%X1 %0,%1 # put_user\n" \ + EX_TABLE(1b, %l2) \ + : \ + : "r" (x), "m<>" (*addr) \ + : \ + : label) + +#ifdef __powerpc64__ +#define __put_user_asm2_goto(x, ptr, label) \ + __put_user_asm_goto(x, ptr, label, "std") +#else /* __powerpc64__ */ +#define __put_user_asm2_goto(x, addr, label) \ + asm volatile goto( \ + "1: stw%X1 %0, %1\n" \ + "2: stw%X1 %L0, %L1\n" \ + EX_TABLE(1b, %l2) \ + EX_TABLE(2b, %l2) \ + : \ + : "r" (x), "m" (*addr) \ + : \ + : label) +#endif /* __powerpc64__ */ + +#define __put_user_size_goto(x, ptr, size, label) \ +do { \ + switch (size) { \ + case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \ + case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \ + case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \ + case 8: __put_user_asm2_goto(x, ptr, label); break; \ + default: __put_user_bad(); \ + } \ +} while (0) + +#define __put_user_nocheck_goto(x, ptr, size, label) \ +do { \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (!is_kernel_addr((unsigned long)__pu_addr)) \ + might_fault(); \ + __chk_user_ptr(ptr); \ + __put_user_size_goto((x), __pu_addr, (size), label); \ +} while (0) + + extern long __get_user_bad(void); /* @@ -491,7 +534,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) -#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e) +#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) #define unsafe_copy_to_user(d, s, l, e) \ unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) -- cgit v1.2.3-59-g8ed1b From 17bc43367fc2a720400d21c745db641c654c1e6b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 17 Apr 2020 17:08:52 +0000 Subject: powerpc/uaccess: Implement unsafe_copy_to_user() as a simple loop At the time being, unsafe_copy_to_user() is based on raw_copy_to_user() which calls __copy_tofrom_user(). __copy_tofrom_user() is a big optimised function to copy big amount of data. It aligns destinations to cache line in order to use dcbz instruction. Today unsafe_copy_to_user() is called only from filldir(). It is used to mainly copy small amount of data like filenames, so __copy_tofrom_user() is not fit. Also, unsafe_copy_to_user() is used within user_access_begin/end sections. In those section, it is preferable to not call functions. Rewrite unsafe_copy_to_user() as a macro that uses __put_user_goto(). We first perform a loop of long, then we finish with necessary complements. unsafe_copy_to_user() might be used in the near future to copy fixed-size data, like pt_regs structs during signal processing. Having it as a macro allows GCC to optimise it for instead when it knows the size in advance, it can unloop loops, drop complements when the size is a multiple of longs, etc ... Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/fe952112c29bf6a0a2778c9e6bbb4f4afd2c4258.1587143308.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/uaccess.h | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 3f30a1dbc198..42b6c44e36a7 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -535,7 +535,26 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) + #define unsafe_copy_to_user(d, s, l, e) \ - unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) +do { \ + u8 __user *_dst = (u8 __user *)(d); \ + const u8 *_src = (const u8 *)(s); \ + size_t _len = (l); \ + int _i; \ + \ + for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \ + __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\ + if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \ + __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ + _i += 4; \ + } \ + if (_len & 2) { \ + __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ + _i += 2; \ + } \ + if (_len & 1) \ + __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\ +} while (0) #endif /* _ARCH_POWERPC_UACCESS_H */ -- cgit v1.2.3-59-g8ed1b From 4fe5cda9f89d0aea8e915b7c96ae34bda4e12e51 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 3 Apr 2020 07:20:53 +0000 Subject: powerpc/uaccess: Implement user_read_access_begin and user_write_access_begin Add support for selective read or write user access with user_read_access_begin/end and user_write_access_begin/end. Signed-off-by: Christophe Leroy Reviewed-by: Kees Cook Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6c83af0f0809ef2a955c39ac622767f6cbede035.1585898438.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/kup.h | 4 ++-- arch/powerpc/include/asm/kup.h | 14 +++++++++++++- arch/powerpc/include/asm/uaccess.h | 22 ++++++++++++++++++++++ 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 3c0ba22dc360..1617e73bee30 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -108,7 +108,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user u32 addr, end; BUILD_BUG_ON(!__builtin_constant_p(dir)); - BUILD_BUG_ON(dir == KUAP_CURRENT); + BUILD_BUG_ON(dir & ~KUAP_READ_WRITE); if (!(dir & KUAP_WRITE)) return; @@ -131,7 +131,7 @@ static __always_inline void prevent_user_access(void __user *to, const void __us BUILD_BUG_ON(!__builtin_constant_p(dir)); - if (dir == KUAP_CURRENT) { + if (dir & KUAP_CURRENT_WRITE) { u32 kuap = current->thread.kuap; if (unlikely(!kuap)) diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 92bcd1a26d73..c745ee41ad66 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -10,7 +10,9 @@ * Use the current saved situation instead of the to/from/size params. * Used on book3s/32 */ -#define KUAP_CURRENT 4 +#define KUAP_CURRENT_READ 4 +#define KUAP_CURRENT_WRITE 8 +#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE) #ifdef CONFIG_PPC64 #include @@ -101,6 +103,16 @@ static inline void prevent_current_access_user(void) prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT); } +static inline void prevent_current_read_from_user(void) +{ + prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ); +} + +static inline void prevent_current_write_to_user(void) +{ + prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE); +} + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_KUAP_H_ */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 42b6c44e36a7..62cc8d7640ec 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -532,6 +532,28 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define user_access_save prevent_user_access_return #define user_access_restore restore_user_access +static __must_check inline bool +user_read_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return false; + allow_read_from_user(ptr, len); + return true; +} +#define user_read_access_begin user_read_access_begin +#define user_read_access_end prevent_current_read_from_user + +static __must_check inline bool +user_write_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return false; + allow_write_to_user((void __user *)ptr, len); + return true; +} +#define user_write_access_begin user_write_access_begin +#define user_write_access_end prevent_current_write_to_user + #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) -- cgit v1.2.3-59-g8ed1b From 43c8a496fa37187b54f7df71fb8262acc6bf6200 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Wed, 29 Apr 2020 18:00:48 +0800 Subject: powerpc/ps3: Move static keyword to the front of declaration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the static keyword to the front of declaration of 'vuart_bus_priv', and resolve the following compiler warning that can be seen when building with warnings enabled (W=1): drivers/ps3/ps3-vuart.c:867:1: warning: ‘static’ is not at beginning of declaration [-Wold-style-declaration] } static vuart_bus_priv; ^ Reported-by: Hulk Robot Signed-off-by: Xiongfeng Wang Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1588154448-56759-1-git-send-email-wangxiongfeng2@huawei.com --- drivers/ps3/ps3-vuart.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c index ddaa5ea5801a..8e80e0933a1b 100644 --- a/drivers/ps3/ps3-vuart.c +++ b/drivers/ps3/ps3-vuart.c @@ -858,13 +858,13 @@ static int ps3_vuart_handle_port_interrupt(struct ps3_system_bus_device *dev) return 0; } -struct vuart_bus_priv { +static struct vuart_bus_priv { struct ports_bmp *bmp; unsigned int virq; struct mutex probe_mutex; int use_count; struct ps3_system_bus_device *devices[PORT_COUNT]; -} static vuart_bus_priv; +} vuart_bus_priv; /** * ps3_vuart_irq_handler - first stage interrupt handler -- cgit v1.2.3-59-g8ed1b From 02c04e374e176ae3a3f64a682f80702f8d2fb65d Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Mon, 20 Apr 2020 14:26:09 +0530 Subject: powerpc/fadump: use static allocation for reserved memory ranges At times, memory ranges have to be looked up during early boot, when kernel couldn't be initialized for dynamic memory allocation. In fact, reserved-ranges look up is needed during FADump memory reservation. Without accounting for reserved-ranges in reserving memory for FADump, MPIPL boot fails with memory corruption issues. So, extend memory ranges handling to support static allocation and populate reserved memory ranges during early boot. Fixes: dda9dbfeeb7a ("powerpc/fadump: consider reserved ranges while releasing memory") Cc: stable@vger.kernel.org Signed-off-by: Hari Bathini Reviewed-by: Mahesh Salgaonkar Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/158737294432.26700.4830263187856221314.stgit@hbathini.in.ibm.com --- arch/powerpc/include/asm/fadump-internal.h | 4 +- arch/powerpc/kernel/fadump.c | 77 +++++++++++++++++------------- 2 files changed, 48 insertions(+), 33 deletions(-) diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h index c814a2b55389..8d61c8f3fec4 100644 --- a/arch/powerpc/include/asm/fadump-internal.h +++ b/arch/powerpc/include/asm/fadump-internal.h @@ -64,12 +64,14 @@ struct fadump_memory_range { }; /* fadump memory ranges info */ +#define RNG_NAME_SZ 16 struct fadump_mrange_info { - char name[16]; + char name[RNG_NAME_SZ]; struct fadump_memory_range *mem_ranges; u32 mem_ranges_sz; u32 mem_range_cnt; u32 max_mem_ranges; + bool is_static; }; /* Platform specific callback functions */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 59e60a9a9f5c..679277b28aef 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -40,8 +40,17 @@ struct kobject *fadump_kobj; #ifndef CONFIG_PRESERVE_FA_DUMP static DEFINE_MUTEX(fadump_mutex); -struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 }; -struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 }; +struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; + +#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */ +#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \ + sizeof(struct fadump_memory_range)) +static struct fadump_memory_range rngs[RESERVED_RNGS_CNT]; +struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs, + RESERVED_RNGS_SZ, 0, + RESERVED_RNGS_CNT, true }; + +static void __init early_init_dt_scan_reserved_ranges(unsigned long node); #ifdef CONFIG_CMA static struct cma *fadump_cma; @@ -110,6 +119,11 @@ static int __init fadump_cma_init(void) { return 1; } int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) { + if (depth == 0) { + early_init_dt_scan_reserved_ranges(node); + return 0; + } + if (depth != 1) return 0; @@ -728,10 +742,14 @@ void fadump_free_cpu_notes_buf(void) static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info) { + if (mrange_info->is_static) { + mrange_info->mem_range_cnt = 0; + return; + } + kfree(mrange_info->mem_ranges); - mrange_info->mem_ranges = NULL; - mrange_info->mem_ranges_sz = 0; - mrange_info->max_mem_ranges = 0; + memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0, + (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ)); } /* @@ -788,6 +806,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) { int ret; + if (mrange_info->is_static) { + pr_err("Reached array size limit for %s memory ranges\n", + mrange_info->name); + return -ENOSPC; + } + ret = fadump_alloc_mem_ranges(mrange_info); if (ret) return ret; @@ -1204,20 +1228,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info) * Scan reserved-ranges to consider them while reserving/releasing * memory for FADump. */ -static inline int fadump_scan_reserved_mem_ranges(void) +static void __init early_init_dt_scan_reserved_ranges(unsigned long node) { - struct device_node *root; const __be32 *prop; int len, ret = -1; unsigned long i; - root = of_find_node_by_path("/"); - if (!root) - return ret; + /* reserved-ranges already scanned */ + if (reserved_mrange_info.mem_range_cnt != 0) + return; - prop = of_get_property(root, "reserved-ranges", &len); + prop = of_get_flat_dt_prop(node, "reserved-ranges", &len); if (!prop) - return ret; + return; /* * Each reserved range is an (address,size) pair, 2 cells each, @@ -1239,7 +1262,8 @@ static inline int fadump_scan_reserved_mem_ranges(void) } } - return ret; + /* Compact reserved ranges */ + sort_and_merge_mem_ranges(&reserved_mrange_info); } /* @@ -1253,32 +1277,21 @@ static void fadump_release_memory(u64 begin, u64 end) u64 ra_start, ra_end, tstart; int i, ret; - fadump_scan_reserved_mem_ranges(); - ra_start = fw_dump.reserve_dump_area_start; ra_end = ra_start + fw_dump.reserve_dump_area_size; /* - * Add reserved dump area to reserved ranges list - * and exclude all these ranges while releasing memory. + * If reserved ranges array limit is hit, overwrite the last reserved + * memory range with reserved dump area to ensure it is excluded from + * the memory being released (reused for next FADump registration). */ - ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); - if (ret != 0) { - /* - * Not enough memory to setup reserved ranges but the system is - * running shortage of memory. So, release all the memory except - * Reserved dump area (reused for next fadump registration). - */ - if (begin < ra_end && end > ra_start) { - if (begin < ra_start) - fadump_release_reserved_area(begin, ra_start); - if (end > ra_end) - fadump_release_reserved_area(ra_end, end); - } else - fadump_release_reserved_area(begin, end); + if (reserved_mrange_info.mem_range_cnt == + reserved_mrange_info.max_mem_ranges) + reserved_mrange_info.mem_range_cnt--; + ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); + if (ret != 0) return; - } /* Get the reserved ranges list in order first. */ sort_and_merge_mem_ranges(&reserved_mrange_info); -- cgit v1.2.3-59-g8ed1b From 140777a3d8dfdb3d3f20ea7707c0f1c0ce1b0aa5 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Mon, 20 Apr 2020 14:26:22 +0530 Subject: powerpc/fadump: consider reserved ranges while reserving memory Commit 0962e8004e97 ("powerpc/prom: Scan reserved-ranges node for memory reservations") enabled support to parse reserved-ranges DT node and reserve kernel memory falling in these ranges for F/W purposes. Memory reserved for FADump should not overlap with these ranges as it could corrupt memory meant for F/W or crash'ed kernel memory to be exported as vmcore. But since commit 579ca1a27675 ("powerpc/fadump: make use of memblock's bottom up allocation mode"), memblock_find_in_range() is being used to find the appropriate area to reserve memory for FADump, which can't account for reserved-ranges as these ranges are reserved only after FADump memory reservation. With reserved-ranges now being populated during early boot, look out for these memory ranges while reserving memory for FADump. Without this change, MPIPL on PowerNV systems aborts with hostboot failure, when memory reserved for FADump is less than 4096MB. Fixes: 579ca1a27675 ("powerpc/fadump: make use of memblock's bottom up allocation mode") Cc: stable@vger.kernel.org Signed-off-by: Hari Bathini Reviewed-by: Mahesh Salgaonkar Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/158737297693.26700.16193820746269425424.stgit@hbathini.in.ibm.com --- arch/powerpc/kernel/fadump.c | 76 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 67 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 679277b28aef..63aac8b5f233 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -445,10 +445,72 @@ static int __init fadump_get_boot_mem_regions(void) return ret; } +/* + * Returns true, if the given range overlaps with reserved memory ranges + * starting at idx. Also, updates idx to index of overlapping memory range + * with the given memory range. + * False, otherwise. + */ +static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx) +{ + bool ret = false; + int i; + + for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) { + u64 rbase = reserved_mrange_info.mem_ranges[i].base; + u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size; + + if (end <= rbase) + break; + + if ((end > rbase) && (base < rend)) { + *idx = i; + ret = true; + break; + } + } + + return ret; +} + +/* + * Locate a suitable memory area to reserve memory for FADump. While at it, + * lookup reserved-ranges & avoid overlap with them, as they are used by F/W. + */ +static u64 __init fadump_locate_reserve_mem(u64 base, u64 size) +{ + struct fadump_memory_range *mrngs; + phys_addr_t mstart, mend; + int idx = 0; + u64 i, ret = 0; + + mrngs = reserved_mrange_info.mem_ranges; + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, + &mstart, &mend, NULL) { + pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n", + i, mstart, mend, base); + + if (mstart > base) + base = PAGE_ALIGN(mstart); + + while ((mend > base) && ((mend - base) >= size)) { + if (!overlaps_reserved_ranges(base, base+size, &idx)) { + ret = base; + goto out; + } + + base = mrngs[idx].base + mrngs[idx].size; + base = PAGE_ALIGN(base); + } + } + +out: + return ret; +} + int __init fadump_reserve_mem(void) { - u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE; - bool is_memblock_bottom_up = memblock_bottom_up(); + u64 base, size, mem_boundary, bootmem_min; int ret = 1; if (!fw_dump.fadump_enabled) @@ -469,9 +531,9 @@ int __init fadump_reserve_mem(void) PAGE_ALIGN(fadump_calculate_reserve_size()); #ifdef CONFIG_CMA if (!fw_dump.nocma) { - align = FADUMP_CMA_ALIGNMENT; fw_dump.boot_memory_size = - ALIGN(fw_dump.boot_memory_size, align); + ALIGN(fw_dump.boot_memory_size, + FADUMP_CMA_ALIGNMENT); } #endif @@ -539,11 +601,7 @@ int __init fadump_reserve_mem(void) * Reserve memory at an offset closer to bottom of the RAM to * minimize the impact of memory hot-remove operation. */ - memblock_set_bottom_up(true); - base = memblock_find_in_range(base, mem_boundary, size, align); - - /* Restore the previous allocation mode */ - memblock_set_bottom_up(is_memblock_bottom_up); + base = fadump_locate_reserve_mem(base, size); if (!base) { pr_err("Failed to find memory chunk for reservation!\n"); -- cgit v1.2.3-59-g8ed1b From 93a98695f2f9f9c48a29ab6249334fdc6e9722cb Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:28 +0530 Subject: mm: change pmdp_huge_get_and_clear_full take vm_area_struct as arg We will use this in later patch to do tlb flush when clearing pmd entries. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-22-aneesh.kumar@linux.ibm.com --- arch/s390/include/asm/pgtable.h | 4 ++-- include/asm-generic/pgtable.h | 4 ++-- mm/huge_memory.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 6076c8c912d2..e2528e057980 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1560,7 +1560,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, } #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL -static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, +static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, int full) { @@ -1569,7 +1569,7 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); return pmd; } - return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); + return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); } #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 329b8c8ca703..d10be362eafa 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -159,11 +159,11 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL -static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, +static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, int full) { - return pmdp_huge_get_and_clear(mm, address, pmdp); + return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); } #endif diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6ecd1045113b..16f2bd6f1549 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1852,8 +1852,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * pgtable_trans_huge_withdraw after finishing pmdp related * operations. */ - orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, - tlb->fullmm); + orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, + tlb->fullmm); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); if (vma_is_special_huge(vma)) { if (arch_needs_pgtable_deposit()) -- cgit v1.2.3-59-g8ed1b From fe4a6856cb4f4353a6cb8d3629bcfe9204e3d57d Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:08 +0530 Subject: powerpc/pkeys: Avoid using lockless page table walk Fetch pkey from vma instead of linux page table. Also document the fact that in some cases the pkey returned in siginfo won't be the same as the one we took keyfault on. Even with linux page table walk, we can end up in a similar scenario. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-2-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/mmu.h | 9 ---- arch/powerpc/mm/book3s64/hash_utils.c | 24 ---------- arch/powerpc/mm/fault.c | 83 +++++++++++++++++++++++++---------- 3 files changed, 60 insertions(+), 56 deletions(-) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 0699cfeeb8c9..cf2a08bfd5cd 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -291,15 +291,6 @@ static inline bool early_radix_enabled(void) } #endif -#ifdef CONFIG_PPC_MEM_KEYS -extern u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address); -#else -static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address) -{ - return 0; -} -#endif /* CONFIG_PPC_MEM_KEYS */ - #ifdef CONFIG_STRICT_KERNEL_RWX static inline bool strict_kernel_rwx_enabled(void) { diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 8ed2411c3f39..e951e87a974d 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1671,30 +1671,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, hash_preload(vma->vm_mm, address, is_exec, trap); } -#ifdef CONFIG_PPC_MEM_KEYS -/* - * Return the protection key associated with the given address and the - * mm_struct. - */ -u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address) -{ - pte_t *ptep; - u16 pkey = 0; - unsigned long flags; - - if (!mm || !mm->pgd) - return 0; - - local_irq_save(flags); - ptep = find_linux_pte(mm->pgd, address, NULL, NULL); - if (ptep) - pkey = pte_to_pkey_bits(pte_val(READ_ONCE(*ptep))); - local_irq_restore(flags); - - return pkey; -} -#endif /* CONFIG_PPC_MEM_KEYS */ - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static inline void tm_flush_hash_page(int local) { diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 84af6c8eecf7..8e529e4708e1 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -118,9 +118,34 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address) return __bad_area(regs, address, SEGV_MAPERR); } -static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address, - int pkey) +#ifdef CONFIG_PPC_MEM_KEYS +static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, + struct vm_area_struct *vma) { + struct mm_struct *mm = current->mm; + int pkey; + + /* + * We don't try to fetch the pkey from page table because reading + * page table without locking doesn't guarantee stable pte value. + * Hence the pkey value that we return to userspace can be different + * from the pkey that actually caused access error. + * + * It does *not* guarantee that the VMA we find here + * was the one that we faulted on. + * + * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); + * 2. T1 : set AMR to deny access to pkey=4, touches, page + * 3. T1 : faults... + * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); + * 5. T1 : enters fault handler, takes mmap_sem, etc... + * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really + * faulted on a pte with its pkey=4. + */ + pkey = vma_pkey(vma); + + up_read(&mm->mmap_sem); + /* * If we are in kernel mode, bail out with a SEGV, this will * be caught by the assembly which will restore the non-volatile @@ -133,6 +158,7 @@ static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address, return 0; } +#endif static noinline int bad_access(struct pt_regs *regs, unsigned long address) { @@ -289,8 +315,31 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, return false; } -static bool access_error(bool is_write, bool is_exec, - struct vm_area_struct *vma) +#ifdef CONFIG_PPC_MEM_KEYS +static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey, + struct vm_area_struct *vma) +{ + /* + * Read or write was blocked by protection keys. This is + * always an unconditional error and can never result in + * a follow-up action to resolve the fault, like a COW. + */ + if (is_pkey) + return true; + + /* + * Make sure to check the VMA so that we do not perform + * faults just to hit a pkey fault as soon as we fill in a + * page. Only called for current mm, hence foreign == 0 + */ + if (!arch_vma_access_permitted(vma, is_write, is_exec, 0)) + return true; + + return false; +} +#endif + +static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma) { /* * Allow execution from readable areas if the MMU does not @@ -483,10 +532,6 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (error_code & DSISR_KEYFAULT) - return bad_key_fault_exception(regs, address, - get_mm_addr_key(mm, address)); - /* * We want to do this outside mmap_sem, because reading code around nip * can result in fault, which will cause a deadlock when called with @@ -555,6 +600,13 @@ retry: return bad_area(regs, address); good_area: + +#ifdef CONFIG_PPC_MEM_KEYS + if (unlikely(access_pkey_error(is_write, is_exec, + (error_code & DSISR_KEYFAULT), vma))) + return bad_access_pkey(regs, address, vma); +#endif /* CONFIG_PPC_MEM_KEYS */ + if (unlikely(access_error(is_write, is_exec, vma))) return bad_access(regs, address); @@ -565,21 +617,6 @@ good_area: */ fault = handle_mm_fault(vma, address, flags); -#ifdef CONFIG_PPC_MEM_KEYS - /* - * we skipped checking for access error due to key earlier. - * Check that using handle_mm_fault error return. - */ - if (unlikely(fault & VM_FAULT_SIGSEGV) && - !arch_vma_access_permitted(vma, is_write, is_exec, 0)) { - - int pkey = vma_pkey(vma); - - up_read(&mm->mmap_sem); - return bad_key_fault_exception(regs, address, pkey); - } -#endif /* CONFIG_PPC_MEM_KEYS */ - major |= fault & VM_FAULT_MAJOR; if (fault_signal_pending(fault, regs)) -- cgit v1.2.3-59-g8ed1b From c46241a370a61f0f264791abb9fc869016e749ce Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:09 +0530 Subject: powerpc/pkeys: Check vma before returning key fault error to the user If multiple threads in userspace keep changing the protection keys mapping a range, there can be a scenario where kernel takes a key fault but the pkey value found in the siginfo struct is a permissive one. This can confuse the userspace as shown in the below test case. /* use this to control the number of test iterations */ static void pkeyreg_set(int pkey, unsigned long rights) { unsigned long reg, shift; shift = (NR_PKEYS - pkey - 1) * PKEY_BITS_PER_PKEY; asm volatile("mfspr %0, 0xd" : "=r"(reg)); reg &= ~(((unsigned long) PKEY_BITS_MASK) << shift); reg |= (rights & PKEY_BITS_MASK) << shift; asm volatile("mtspr 0xd, %0" : : "r"(reg)); } static unsigned long pkeyreg_get(void) { unsigned long reg; asm volatile("mfspr %0, 0xd" : "=r"(reg)); return reg; } static int sys_pkey_mprotect(void *addr, size_t len, int prot, int pkey) { return syscall(SYS_pkey_mprotect, addr, len, prot, pkey); } static int sys_pkey_alloc(unsigned long flags, unsigned long access_rights) { return syscall(SYS_pkey_alloc, flags, access_rights); } static int sys_pkey_free(int pkey) { return syscall(SYS_pkey_free, pkey); } static int faulting_pkey; static int permissive_pkey; static pthread_barrier_t pkey_set_barrier; static pthread_barrier_t mprotect_barrier; static void pkey_handle_fault(int signum, siginfo_t *sinfo, void *ctx) { unsigned long pkeyreg; /* FIXME: printf is not signal-safe but for the current purpose, it gets the job done. */ printf("pkey: exp = %d, got = %d\n", faulting_pkey, sinfo->si_pkey); fflush(stdout); assert(sinfo->si_code == SEGV_PKUERR); assert(sinfo->si_pkey == faulting_pkey); /* clear pkey permissions to let the faulting instruction continue */ pkeyreg_set(faulting_pkey, 0x0); } static void *do_mprotect_fault(void *p) { unsigned long rights, pkeyreg, pgsize; unsigned int i; void *region; int pkey; srand(time(NULL)); pgsize = sysconf(_SC_PAGESIZE); rights = PKEY_DISABLE_WRITE; region = p; /* allocate key, no permissions */ assert((pkey = sys_pkey_alloc(0, PKEY_DISABLE_ACCESS)) > 0); pkeyreg_set(4, 0x0); /* cache the pkey here as the faulting pkey for future reference in the signal handler */ faulting_pkey = pkey; printf("%s: faulting pkey = %d\n", __func__, faulting_pkey); /* try to allocate, mprotect and free pkeys repeatedly */ for (i = 0; i < NUM_ITERATIONS; i++) { /* sync up with the other thread here */ pthread_barrier_wait(&pkey_set_barrier); /* make sure that the pkey used by the non-faulting thread is made permissive for this thread's context too so that no faults are triggered because it still might have been set to a restrictive value */ // pkeyreg_set(permissive_pkey, 0x0); /* sync up with the other thread here */ pthread_barrier_wait(&mprotect_barrier); /* perform mprotect */ assert(!sys_pkey_mprotect(region, pgsize, PROT_READ | PROT_WRITE, pkey)); /* choose a random byte from the protected region and attempt to write to it, this will generate a fault */ *((char *) region + (rand() % pgsize)) = rand(); /* restore pkey permissions as the signal handler may have cleared the bit out for the sake of continuing */ pkeyreg_set(pkey, PKEY_DISABLE_WRITE); } /* free pkey */ sys_pkey_free(pkey); return NULL; } static void *do_mprotect_nofault(void *p) { unsigned long pgsize; unsigned int i, j; void *region; int pkey; pgsize = sysconf(_SC_PAGESIZE); region = p; /* try to allocate, mprotect and free pkeys repeatedly */ for (i = 0; i < NUM_ITERATIONS; i++) { /* allocate pkey, all permissions */ assert((pkey = sys_pkey_alloc(0, 0)) > 0); permissive_pkey = pkey; /* sync up with the other thread here */ pthread_barrier_wait(&pkey_set_barrier); pthread_barrier_wait(&mprotect_barrier); /* perform mprotect on the common page, no faults will be triggered as this is most permissive */ assert(!sys_pkey_mprotect(region, pgsize, PROT_READ | PROT_WRITE, pkey)); /* free pkey */ assert(!sys_pkey_free(pkey)); } return NULL; } int main(int argc, char **argv) { pthread_t fault_thread, nofault_thread; unsigned long pgsize; struct sigaction act; pthread_attr_t attr; cpu_set_t fault_cpuset, nofault_cpuset; unsigned int i; void *region; /* allocate memory region to protect */ pgsize = sysconf(_SC_PAGESIZE); assert(region = memalign(pgsize, pgsize)); CPU_ZERO(&fault_cpuset); CPU_SET(0, &fault_cpuset); CPU_ZERO(&nofault_cpuset); CPU_SET(8, &nofault_cpuset); assert(!pthread_attr_init(&attr)); /* setup sigsegv signal handler */ act.sa_handler = 0; act.sa_sigaction = pkey_handle_fault; assert(!sigprocmask(SIG_SETMASK, 0, &act.sa_mask)); act.sa_flags = SA_SIGINFO; act.sa_restorer = 0; assert(!sigaction(SIGSEGV, &act, NULL)); /* setup barrier for the two threads */ pthread_barrier_init(&pkey_set_barrier, NULL, 2); pthread_barrier_init(&mprotect_barrier, NULL, 2); /* setup and start threads */ assert(!pthread_create(&fault_thread, &attr, &do_mprotect_fault, region)); assert(!pthread_setaffinity_np(fault_thread, sizeof(cpu_set_t), &fault_cpuset)); assert(!pthread_create(&nofault_thread, &attr, &do_mprotect_nofault, region)); assert(!pthread_setaffinity_np(nofault_thread, sizeof(cpu_set_t), &nofault_cpuset)); /* cleanup */ assert(!pthread_attr_destroy(&attr)); assert(!pthread_join(fault_thread, NULL)); assert(!pthread_join(nofault_thread, NULL)); assert(!pthread_barrier_destroy(&pkey_set_barrier)); assert(!pthread_barrier_destroy(&mprotect_barrier)); free(region); puts("PASS"); return EXIT_SUCCESS; } The above test can result the below failure without this patch. pkey: exp = 3, got = 3 pkey: exp = 3, got = 4 a.out: pkey-siginfo-race.c:100: pkey_handle_fault: Assertion `sinfo->si_pkey == faulting_pkey' failed. Aborted Check for vma access before considering this a key fault. If vma pkey allow access retry the acess again. Test case is written by Sandipan Das hence added SOB from him. Signed-off-by: Sandipan Das Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-3-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/fault.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 8e529e4708e1..44457bae77a0 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -319,14 +319,6 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey, struct vm_area_struct *vma) { - /* - * Read or write was blocked by protection keys. This is - * always an unconditional error and can never result in - * a follow-up action to resolve the fault, like a COW. - */ - if (is_pkey) - return true; - /* * Make sure to check the VMA so that we do not perform * faults just to hit a pkey fault as soon as we fill in a -- cgit v1.2.3-59-g8ed1b From ec4abf1e70cf6a3fe6e571d640260005c997c6e1 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:10 +0530 Subject: powerpc/mm/hash64: use _PAGE_PTE when checking for pte_present This makes the pte_present check stricter by checking for additional _PAGE_PTE bit. A level 1 pte pointer (THP pte) can be switched to a pointer to level 0 pte page table page by following two operations. 1) THP split. 2) madvise(MADV_DONTNEED) in parallel to page fault. A lockless page table walk need to make sure we can handle such changes gracefully. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-4-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/pgtable.h | 15 ++++++++++----- arch/powerpc/mm/book3s64/hash_utils.c | 11 +++++++++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 368b136517e0..03521a8b0292 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -553,6 +553,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte) } #endif /* CONFIG_NUMA_BALANCING */ +static inline bool pte_hw_valid(pte_t pte) +{ + return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE)) == + cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE); +} + static inline int pte_present(pte_t pte) { /* @@ -561,12 +567,11 @@ static inline int pte_present(pte_t pte) * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID * if we find _PAGE_PRESENT cleared. */ - return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)); -} -static inline bool pte_hw_valid(pte_t pte) -{ - return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT)); + if (pte_hw_valid(pte)) + return true; + return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) == + cpu_to_be64(_PAGE_INVALID | _PAGE_PTE); } #ifdef CONFIG_PPC_MEM_KEYS diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index e951e87a974d..525eac4ee2c2 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1350,8 +1350,15 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, goto bail; } - /* Add _PAGE_PRESENT to the required access perm */ - access |= _PAGE_PRESENT; + /* + * Add _PAGE_PRESENT to the required access perm. If there are parallel + * updates to the pte that can possibly clear _PAGE_PTE, catch that too. + * + * We can safely use the return pte address in rest of the function + * because we do set H_PAGE_BUSY which prevents further updates to pte + * from generic code. + */ + access |= _PAGE_PRESENT | _PAGE_PTE; /* * Pre-check access permissions (will be re-checked atomically -- cgit v1.2.3-59-g8ed1b From 7900757ce1b4affda1591aa3fd073e27d202f406 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:11 +0530 Subject: powerpc/hash64: Restrict page table lookup using init_mm with __flush_hash_table_range This is only used with init_mm currently. Walking init_mm is much simpler because we don't need to handle concurrent page table like other mm_context Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-5-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 3 +-- arch/powerpc/kernel/pci_64.c | 2 +- arch/powerpc/mm/book3s64/hash_tlb.c | 16 +++------------- 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index 64d02a704bcb..3b95769739c7 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -113,8 +113,7 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start, struct mmu_gather; extern void hash__tlb_flush(struct mmu_gather *tlb); /* Private function for use by PCI IO mapping code */ -extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, - unsigned long end); +extern void __flush_hash_table_range(unsigned long start, unsigned long end); extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr); #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */ diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index f83d1f69b1dd..30d07fc79dd1 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -100,7 +100,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus) pci_name(bus->self)); #ifdef CONFIG_PPC_BOOK3S_64 - __flush_hash_table_range(&init_mm, res->start + _IO_BASE, + __flush_hash_table_range(res->start + _IO_BASE, res->end + _IO_BASE + 1); #endif return 0; diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c index 4a70d8dd39cd..1fa2173413b5 100644 --- a/arch/powerpc/mm/book3s64/hash_tlb.c +++ b/arch/powerpc/mm/book3s64/hash_tlb.c @@ -176,7 +176,6 @@ void hash__tlb_flush(struct mmu_gather *tlb) * from the hash table (and the TLB). But keeps * the linux PTEs intact. * - * @mm : mm_struct of the target address space (generally init_mm) * @start : starting address * @end : ending address (not included in the flush) * @@ -189,17 +188,14 @@ void hash__tlb_flush(struct mmu_gather *tlb) * Because of that usage pattern, it is implemented for small size rather * than speed. */ -void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, - unsigned long end) +void __flush_hash_table_range(unsigned long start, unsigned long end) { - bool is_thp; int hugepage_shift; unsigned long flags; start = _ALIGN_DOWN(start, PAGE_SIZE); end = _ALIGN_UP(end, PAGE_SIZE); - BUG_ON(!mm->pgd); /* * Note: Normally, we should only ever use a batch within a @@ -212,21 +208,15 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, local_irq_save(flags); arch_enter_lazy_mmu_mode(); for (; start < end; start += PAGE_SIZE) { - pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp, - &hugepage_shift); + pte_t *ptep = find_init_mm_pte(start, &hugepage_shift); unsigned long pte; if (ptep == NULL) continue; pte = pte_val(*ptep); - if (is_thp) - trace_hugepage_invalidate(start, pte); if (!(pte & H_PAGE_HASHPTE)) continue; - if (unlikely(is_thp)) - hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte); - else - hpte_need_flush(mm, start, ptep, pte, hugepage_shift); + hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift); } arch_leave_lazy_mmu_mode(); local_irq_restore(flags); -- cgit v1.2.3-59-g8ed1b From 2f92447f9f96583112420aa3cfb400ded55f667e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:12 +0530 Subject: powerpc/book3s64/hash: Use the pte_t address from the caller Don't fetch the pte value using lockless page table walk. Instead use the value from the caller. hash_preload is called with ptl lock held. So it is safe to use the pte_t address directly. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-6-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/hash_utils.c | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 525eac4ee2c2..3d727f73a8db 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1546,14 +1546,11 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) } #endif -static void hash_preload(struct mm_struct *mm, unsigned long ea, +static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, bool is_exec, unsigned long trap) { - int hugepage_shift; unsigned long vsid; pgd_t *pgdir; - pte_t *ptep; - unsigned long flags; int rc, ssize, update_flags = 0; unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); @@ -1575,30 +1572,18 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea, vsid = get_user_vsid(&mm->context, ea, ssize); if (!vsid) return; - /* - * Hash doesn't like irqs. Walking linux page table with irq disabled - * saves us from holding multiple locks. - */ - local_irq_save(flags); - /* - * THP pages use update_mmu_cache_pmd. We don't do - * hash preload there. Hence can ignore THP here - */ - ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift); - if (!ptep) - goto out_exit; - - WARN_ON(hugepage_shift); #ifdef CONFIG_PPC_64K_PAGES /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on * a 64K kernel), then we don't preload, hash_page() will take * care of it once we actually try to access the page. * That way we don't have to duplicate all of the logic for segment * page size demotion here + * Called with PTL held, hence can be sure the value won't change in + * between. */ if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep)) - goto out_exit; + return; #endif /* CONFIG_PPC_64K_PAGES */ /* Is that local to this CPU ? */ @@ -1623,8 +1608,6 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea, mm_ctx_user_psize(&mm->context), mm_ctx_user_psize(&mm->context), pte_val(*ptep)); -out_exit: - local_irq_restore(flags); } /* @@ -1675,7 +1658,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, return; } - hash_preload(vma->vm_mm, address, is_exec, trap); + hash_preload(vma->vm_mm, ptep, address, is_exec, trap); } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -- cgit v1.2.3-59-g8ed1b From 0da81b658b5bf5c16abe51eb32f1905a32322f53 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:13 +0530 Subject: powerpc/mce: Don't reload pte val in addr_to_pfn A lockless page table walk should be safe against parallel THP collapse, THP split and madvise(MADV_DONTNEED)/parallel fault. This patch makes sure kernel won't reload the pteval when checking for different conditions. The patch also added a check for pte_present to make sure the kernel is indeed operating on a PTE and not a pointer to level 0 table page. The pfn value we find here can be different from the actual pfn on which machine check happened. This can happen if we raced with a parallel update of the page table. In such a scenario we end up isolating a wrong pfn. But that doesn't have any other side effect. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-7-aneesh.kumar@linux.ibm.com --- arch/powerpc/kernel/mce_power.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 067b094bfeff..1d18991f3854 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -27,7 +27,7 @@ */ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr) { - pte_t *ptep; + pte_t *ptep, pte; unsigned int shift; unsigned long pfn, flags; struct mm_struct *mm; @@ -39,19 +39,23 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr) local_irq_save(flags); ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift); + if (!ptep) { + pfn = ULONG_MAX; + goto out; + } + pte = READ_ONCE(*ptep); - if (!ptep || pte_special(*ptep)) { + if (!pte_present(pte) || pte_special(pte)) { pfn = ULONG_MAX; goto out; } if (shift <= PAGE_SHIFT) - pfn = pte_pfn(*ptep); + pfn = pte_pfn(pte); else { unsigned long rpnmask = (1ul << shift) - PAGE_SIZE; - pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask))); + pfn = pte_pfn(__pte(pte_val(pte) | (addr & rpnmask))); } - out: local_irq_restore(flags); return pfn; -- cgit v1.2.3-59-g8ed1b From 15759cb054efdd45e6db8433a829a5734e6d50f6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:14 +0530 Subject: powerpc/perf/callchain: Use __get_user_pages_fast in read_user_stack_slow read_user_stack_slow is called with interrupts soft disabled and it copies contents from the page which we find mapped to a specific address. To convert userspace address to pfn, the kernel now uses lockless page table walk. The kernel needs to make sure the pfn value read remains stable and is not released and reused for another process while the contents are read from the page. This can only be achieved by holding a page reference. One of the first approaches I tried was to check the pte value after the kernel copies the contents from the page. But as shown below we can still get it wrong CPU0 CPU1 pte = READ_ONCE(*ptep); pte_clear(pte); put_page(page); page = alloc_page(); memcpy(page_address(page), "secret password", nr); memcpy(buf, kaddr + offset, nb); put_page(page); handle_mm_fault() page = alloc_page(); set_pte(pte, page); if (pte_val(pte) != pte_val(*ptep)) Hence switch to __get_user_pages_fast. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-8-aneesh.kumar@linux.ibm.com --- arch/powerpc/perf/callchain_64.c | 46 ++++++++++++---------------------------- 1 file changed, 14 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c index df1ffd8b20f2..b63086b663ef 100644 --- a/arch/powerpc/perf/callchain_64.c +++ b/arch/powerpc/perf/callchain_64.c @@ -26,43 +26,25 @@ */ int read_user_stack_slow(void __user *ptr, void *buf, int nb) { - int ret = -EFAULT; - pgd_t *pgdir; - pte_t *ptep, pte; - unsigned int shift; + unsigned long addr = (unsigned long) ptr; unsigned long offset; - unsigned long pfn, flags; + struct page *page; + int nrpages; void *kaddr; - pgdir = current->mm->pgd; - if (!pgdir) - return -EFAULT; + nrpages = __get_user_pages_fast(addr, 1, 1, &page); + if (nrpages == 1) { + kaddr = page_address(page); + + /* align address to page boundary */ + offset = addr & ~PAGE_MASK; - local_irq_save(flags); - ptep = find_current_mm_pte(pgdir, addr, NULL, &shift); - if (!ptep) - goto err_out; - if (!shift) - shift = PAGE_SHIFT; - - /* align address to page boundary */ - offset = addr & ((1UL << shift) - 1); - - pte = READ_ONCE(*ptep); - if (!pte_present(pte) || !pte_user(pte)) - goto err_out; - pfn = pte_pfn(pte); - if (!page_is_ram(pfn)) - goto err_out; - - /* no highmem to worry about here */ - kaddr = pfn_to_kaddr(pfn); - memcpy(buf, kaddr + offset, nb); - ret = 0; -err_out: - local_irq_restore(flags); - return ret; + memcpy(buf, kaddr + offset, nb); + put_page(page); + return 0; + } + return -EFAULT; } static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) -- cgit v1.2.3-59-g8ed1b From 87013f9c602cfbbc0734fb2f703df9fc884d05d9 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:15 +0530 Subject: powerpc/kvm/book3s: switch from raw_spin_*lock to arch_spin_lock. These functions can get called in realmode. Hence use low level arch_spin_lock which is safe to be called in realmode. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-9-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 220305454c23..03f8347de48b 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -948,7 +948,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, return ret; /* Check if we've been invalidated */ - raw_spin_lock(&kvm->mmu_lock.rlock); + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); if (mmu_notifier_retry(kvm, mmu_seq)) { ret = H_TOO_HARD; goto out_unlock; @@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); out_unlock: - raw_spin_unlock(&kvm->mmu_lock.rlock); + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } @@ -984,7 +984,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, return ret; /* Check if we've been invalidated */ - raw_spin_lock(&kvm->mmu_lock.rlock); + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); if (mmu_notifier_retry(kvm, mmu_seq)) { ret = H_TOO_HARD; goto out_unlock; @@ -996,7 +996,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE); out_unlock: - raw_spin_unlock(&kvm->mmu_lock.rlock); + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } -- cgit v1.2.3-59-g8ed1b From 4b99412ed6972cc77c1f16009e1d00323fcef9ab Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:16 +0530 Subject: powerpc/kvm/book3s: Add helper to walk partition scoped linux page table. The locking rules for walking partition scoped table is different from process scoped table. Hence add a helper for secondary linux page table walk and also add check whether we are holding the right locks. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-10-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/kvm_book3s_64.h | 13 +++++++++++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 12 ++++++------ arch/powerpc/kvm/book3s_hv_nested.c | 2 +- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 04b2b927bb5a..2c2635967d6e 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -14,6 +14,7 @@ #include #include #include +#include #ifdef CONFIG_PPC_PSERIES static inline bool kvmhv_on_pseries(void) @@ -634,6 +635,18 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, unsigned long gpa, unsigned long hpa, unsigned long nbytes); +static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, + unsigned *hshift) +{ + pte_t *pte; + + VM_WARN(!spin_is_locked(&kvm->mmu_lock), + "%s called with kvm mmu_lock not held \n", __func__); + pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift); + + return pte; +} + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index aa12cd4078b3..c92d413eeaaf 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -981,11 +981,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, return 0; } - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, kvm->arch.lpid); - return 0; + return 0; } /* Called with kvm->mmu_lock held */ @@ -1001,7 +1001,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ref; - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) { old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, gpa, shift); @@ -1028,7 +1028,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ref; - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) ref = 1; return ref; @@ -1048,7 +1048,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ret; - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { ret = 1; if (shift) @@ -1109,7 +1109,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, gpa = memslot->base_gfn << PAGE_SHIFT; spin_lock(&kvm->mmu_lock); for (n = memslot->npages; n; --n) { - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, kvm->arch.lpid); diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index dc97e5be76f6..7f1fc5db13ea 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1362,7 +1362,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run, /* See if can find translation in our partition scoped tables for L1 */ pte = __pte(0); spin_lock(&kvm->mmu_lock); - pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + pte_p = find_kvm_secondary_pte(kvm, gpa, &shift); if (!shift) shift = PAGE_SHIFT; if (pte_p) -- cgit v1.2.3-59-g8ed1b From dc891849e030199d203334b2ddd2bd4fc5a87733 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:17 +0530 Subject: powerpc/kvm/nested: Add helper to walk nested shadow linux page table. The locking rules for walking nested shadow linux page table is different from process scoped table. Hence add a helper for nested page table walk and also add check whether we are holding the right locks. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-11-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_hv_nested.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 7f1fc5db13ea..b2cc3eaec618 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -750,6 +750,24 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) return kvm->arch.nested_guests[lpid]; } +static pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, + unsigned long ea, unsigned *hshift) +{ + struct kvm_nested_guest *gp; + pte_t *pte; + + gp = kvmhv_find_nested(kvm, lpid); + if (!gp) + return NULL; + + VM_WARN(!spin_is_locked(&kvm->mmu_lock), + "%s called with kvm mmu_lock not held \n", __func__); + pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift); + + return pte; +} + + static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2) { return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK | @@ -792,19 +810,15 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap, unsigned long clr, unsigned long set, unsigned long hpa, unsigned long mask) { - struct kvm_nested_guest *gp; unsigned long gpa; unsigned int shift, lpid; pte_t *ptep; gpa = n_rmap & RMAP_NESTED_GPA_MASK; lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; - gp = kvmhv_find_nested(kvm, lpid); - if (!gp) - return; /* Find the pte */ - ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); + ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); /* * If the pte is present and the pfn is still the same, update the pte. * If the pfn has changed then this is a stale rmap entry, the nested @@ -854,7 +868,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, return; /* Find and invalidate the pte */ - ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); + ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); /* Don't spuriously invalidate ptes if the pfn has changed */ if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); @@ -921,7 +935,7 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, int shift; spin_lock(&kvm->mmu_lock); - ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); + ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift); if (!shift) shift = PAGE_SHIFT; if (ptep && pte_present(*ptep)) { -- cgit v1.2.3-59-g8ed1b From 6cdf30375f82fbc1d30252096440265426c0993c Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:18 +0530 Subject: powerpc/kvm/book3s: Use kvm helpers to walk shadow or secondary table update kvmppc_hv_handle_set_rc to use find_kvm_nested_guest_pte and find_kvm_secondary_pte Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-12-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/kvm_book3s.h | 2 +- arch/powerpc/include/asm/kvm_book3s_64.h | 3 +++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 18 +++++++++--------- arch/powerpc/kvm/book3s_hv_nested.c | 13 ++++++------- 4 files changed, 19 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 506e4df2d730..37c8b50cb505 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -198,7 +198,7 @@ extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, unsigned int lpid); -extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, +extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, unsigned long gpa, unsigned int lpid); extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 2c2635967d6e..2860521992b6 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -647,6 +647,9 @@ static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, return pte; } +extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, + unsigned long ea, unsigned *hshift); + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index c92d413eeaaf..70c4025406d8 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -735,7 +735,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, return ret; } -bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing, +bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, unsigned long gpa, unsigned int lpid) { unsigned long pgflags; @@ -750,12 +750,12 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing, pgflags = _PAGE_ACCESSED; if (writing) pgflags |= _PAGE_DIRTY; - /* - * We are walking the secondary (partition-scoped) page table here. - * We can do this without disabling irq because the Linux MM - * subsystem doesn't do THP splits and collapses on this tree. - */ - ptep = __find_linux_pte(pgtable, gpa, NULL, &shift); + + if (nested) + ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); + else + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); + if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) { kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); return true; @@ -949,8 +949,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* Failed to set the reference/change bits */ if (dsisr & DSISR_SET_RC) { spin_lock(&kvm->mmu_lock); - if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, - writing, gpa, kvm->arch.lpid)) + if (kvmppc_hv_handle_set_rc(kvm, false, writing, + gpa, kvm->arch.lpid)) dsisr &= ~DSISR_SET_RC; spin_unlock(&kvm->mmu_lock); diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index b2cc3eaec618..99011f1b772a 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -750,8 +750,8 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) return kvm->arch.nested_guests[lpid]; } -static pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, - unsigned long ea, unsigned *hshift) +pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, + unsigned long ea, unsigned *hshift) { struct kvm_nested_guest *gp; pte_t *pte; @@ -767,7 +767,6 @@ static pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, return pte; } - static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2) { return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK | @@ -1226,16 +1225,16 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, spin_lock(&kvm->mmu_lock); /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */ - ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing, - gpte.raddr, kvm->arch.lpid); + ret = kvmppc_hv_handle_set_rc(kvm, false, writing, + gpte.raddr, kvm->arch.lpid); if (!ret) { ret = -EINVAL; goto out_unlock; } /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */ - ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa, - gp->shadow_lpid); + ret = kvmppc_hv_handle_set_rc(kvm, true, writing, + n_gpa, gp->shadow_lpid); if (!ret) ret = -EINVAL; else -- cgit v1.2.3-59-g8ed1b From 35528876a92917b60c20077121e5c6805936cd7d Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:19 +0530 Subject: powerpc/kvm/book3s: Add helper for host page table walk Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-13-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/kvm_book3s_64.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 2860521992b6..1ca1f6495012 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -647,6 +647,22 @@ static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, return pte; } +static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq, + unsigned long ea, unsigned *hshift) +{ + pte_t *pte; + + VM_WARN(!spin_is_locked(&kvm->mmu_lock), + "%s called with kvm mmu_lock not held \n", __func__); + + if (mmu_notifier_retry(kvm, mmu_seq)) + return NULL; + + pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift); + + return pte; +} + extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, unsigned long ea, unsigned *hshift); -- cgit v1.2.3-59-g8ed1b From 9781e759b3258bc607296bc1bf7cc314b7bfd0fc Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:20 +0530 Subject: powerpc/kvm/book3s: Use find_kvm_host_pte in page fault handler Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-14-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 2b35f9bcf892..8f9cd4c79044 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -602,12 +602,12 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, * Read the PTE from the process' radix tree and use that * so we get the shift and attribute bits. */ - local_irq_disable(); - ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + spin_lock(&kvm->mmu_lock); + ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); pte = __pte(0); if (ptep) - pte = *ptep; - local_irq_enable(); + pte = READ_ONCE(*ptep); + spin_unlock(&kvm->mmu_lock); /* * If the PTE disappeared temporarily due to a THP * collapse, just return and let the guest try again. -- cgit v1.2.3-59-g8ed1b From e3d8ed5518c7f50e24d2530b36d14b6c4284769f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:21 +0530 Subject: powerpc/kvm/book3s: Use find_kvm_host_pte in h_enter Since kvmppc_do_h_enter can get called in realmode use low level arch_spin_lock which is safe to be called in realmode. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-15-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 5 ++--- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 22 ++++++---------------- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 8f9cd4c79044..18aed9775a3c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -281,11 +281,10 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, { long ret; - /* Protect linux PTE lookup from page table destruction */ - rcu_read_lock_sched(); /* this disables preemption too */ + preempt_disable(); ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, kvm->mm->pgd, false, pte_idx_ret); - rcu_read_unlock_sched(); + preempt_enable(); if (ret == H_TOO_HARD) { /* this can't happen */ pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 03f8347de48b..83e987fecf97 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -210,7 +210,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, pte_t *ptep; unsigned int writing; unsigned long mmu_seq; - unsigned long rcbits, irq_flags = 0; + unsigned long rcbits; if (kvm_is_radix(kvm)) return H_FUNCTION; @@ -248,17 +248,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); - /* - * If we had a page table table change after lookup, we would - * retry via mmu_notifier_retry. - */ - if (!realmode) - local_irq_save(irq_flags); - /* - * If called in real mode we have MSR_EE = 0. Otherwise - * we disable irq above. - */ - ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift); + + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); + ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift); if (ptep) { pte_t pte; unsigned int host_pte_size; @@ -272,8 +264,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, * to <= host page size, if host is using hugepage */ if (host_pte_size < psize) { - if (!realmode) - local_irq_restore(flags); + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return H_PARAMETER; } pte = kvmppc_read_update_linux_pte(ptep, writing); @@ -287,8 +278,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, pa |= gpa & ~PAGE_MASK; } } - if (!realmode) - local_irq_restore(irq_flags); + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1); ptel |= pa; -- cgit v1.2.3-59-g8ed1b From 7769a3394bdf10fe2289ea1f5059aa750a6218c2 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:22 +0530 Subject: powerpc/kvm/book3s: use find_kvm_host_pte in pute_tce functions Current code just hold rmap lock to ensure parallel page table update is prevented. That is not sufficient. The kernel should also check whether a mmu_notifer callback was running in parallel. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-16-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_64_vio_hv.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 6fcaf1fa8e02..acc3ce570be7 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -437,8 +437,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return H_SUCCESS; } -static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, - unsigned long ua, unsigned long *phpa) +static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, + unsigned long ua, unsigned long *phpa) { pte_t *ptep, pte; unsigned shift = 0; @@ -452,10 +452,17 @@ static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, * to exit which will agains result in the below page table walk * to finish. */ - ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift); - if (!ptep || !pte_present(*ptep)) + /* an rmap lock won't make it safe. because that just ensure hash + * page table entries are removed with rmap lock held. After that + * mmu notifier returns and we go ahead and removing ptes from Qemu page table. + */ + ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift); + if (!ptep) + return -ENXIO; + + pte = READ_ONCE(*ptep); + if (!pte_present(pte)) return -ENXIO; - pte = *ptep; if (!shift) shift = PAGE_SHIFT; @@ -477,10 +484,12 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce_list, unsigned long npages) { + struct kvm *kvm = vcpu->kvm; struct kvmppc_spapr_tce_table *stt; long i, ret = H_SUCCESS; unsigned long tces, entry, ua = 0; unsigned long *rmap = NULL; + unsigned long mmu_seq; bool prereg = false; struct kvmppc_spapr_tce_iommu_table *stit; @@ -488,6 +497,12 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (kvm_is_radix(vcpu->kvm)) return H_TOO_HARD; + /* + * used to check for invalidations in progress + */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; @@ -547,7 +562,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, * real page. */ lock_rmap(rmap); - if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) { + + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); + if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) { ret = H_TOO_HARD; goto unlock_exit; } @@ -593,6 +610,7 @@ unlock_exit: if (rmap) unlock_rmap(rmap); + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } -- cgit v1.2.3-59-g8ed1b From 3ff8df1430e9eaa18644345971342b09ba5348a3 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:23 +0530 Subject: powerpc/kvm/book3s: Avoid using rmap to protect parallel page table update. We now depend on kvm->mmu_lock Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-17-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_64_vio_hv.c | 38 +++++++++---------------------------- 1 file changed, 9 insertions(+), 29 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index acc3ce570be7..167029e57c8f 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -74,8 +74,8 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, EXPORT_SYMBOL_GPL(kvmppc_find_table); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce, - unsigned long *ua, unsigned long **prmap) +static long kvmppc_rm_tce_to_ua(struct kvm *kvm, + unsigned long tce, unsigned long *ua) { unsigned long gfn = tce >> PAGE_SHIFT; struct kvm_memory_slot *memslot; @@ -87,9 +87,6 @@ static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce, *ua = __gfn_to_hva_memslot(memslot, gfn) | (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); - if (prmap) - *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; - return 0; } @@ -116,7 +113,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_PARAMETER; - if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL)) + if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua)) return H_TOO_HARD; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -411,7 +408,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return ret; dir = iommu_tce_direction(tce); - if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) + if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) return H_PARAMETER; entry = ioba >> stt->page_shift; @@ -488,7 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, struct kvmppc_spapr_tce_table *stt; long i, ret = H_SUCCESS; unsigned long tces, entry, ua = 0; - unsigned long *rmap = NULL; unsigned long mmu_seq; bool prereg = false; struct kvmppc_spapr_tce_iommu_table *stit; @@ -530,7 +526,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, */ struct mm_iommu_table_group_mem_t *mem; - if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua)) return H_TOO_HARD; mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); @@ -546,23 +542,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, * We do not require memory to be preregistered in this case * so lock rmap and do __find_linux_pte_or_hugepte(). */ - if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) - return H_TOO_HARD; - - rmap = (void *) vmalloc_to_phys(rmap); - if (WARN_ON_ONCE_RM(!rmap)) + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua)) return H_TOO_HARD; - /* - * Synchronize with the MMU notifier callbacks in - * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.). - * While we have the rmap lock, code running on other CPUs - * cannot finish unmapping the host real page that backs - * this guest real page, so we are OK to access the host - * real page. - */ - lock_rmap(rmap); - arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) { ret = H_TOO_HARD; @@ -582,7 +564,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); ua = 0; - if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) { ret = H_PARAMETER; goto invalidate_exit; } @@ -607,10 +589,8 @@ invalidate_exit: iommu_tce_kill_rm(stit->tbl, entry, npages); unlock_exit: - if (rmap) - unlock_rmap(rmap); - - arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); + if (!prereg) + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } -- cgit v1.2.3-59-g8ed1b From bda3deaa6fc800218b6b59213e8da644e52787dc Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:24 +0530 Subject: powerpc/kvm/book3s: use find_kvm_host_pte in kvmppc_book3s_instantiate_page Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-18-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 70c4025406d8..271f1c3d8443 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -813,12 +813,12 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, * Read the PTE from the process' radix tree and use that * so we get the shift and attribute bits. */ - local_irq_disable(); - ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + spin_lock(&kvm->mmu_lock); + ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); pte = __pte(0); if (ptep) - pte = *ptep; - local_irq_enable(); + pte = READ_ONCE(*ptep); + spin_unlock(&kvm->mmu_lock); /* * If the PTE disappeared temporarily due to a THP * collapse, just return and let the guest try again. -- cgit v1.2.3-59-g8ed1b From 9fd4236faa243f6660812b809bf2fb91d19c61b6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:25 +0530 Subject: powerpc/kvm/book3s: Use find_kvm_host_pte in kvmppc_get_hpa Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-19-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 32 +++++++++++--------------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 83e987fecf97..3b168c69d503 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -878,8 +878,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, return ret; } -static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa, - int writing, unsigned long *hpa, +static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, + unsigned long gpa, int writing, unsigned long *hpa, struct kvm_memory_slot **memslot_p) { struct kvm *kvm = vcpu->kvm; @@ -898,7 +898,7 @@ static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa, hva = __gfn_to_hva_memslot(memslot, gfn); /* Try to find the host pte for that virtual address */ - ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); if (!ptep) return H_TOO_HARD; pte = kvmppc_read_update_linux_pte(ptep, writing); @@ -933,16 +933,11 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, mmu_seq = kvm->mmu_notifier_seq; smp_rmb(); - ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot); - if (ret != H_SUCCESS) - return ret; - - /* Check if we've been invalidated */ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); - if (mmu_notifier_retry(kvm, mmu_seq)) { - ret = H_TOO_HARD; + + ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot); + if (ret != H_SUCCESS) goto out_unlock; - } /* Zero the page */ for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES) @@ -966,19 +961,14 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, mmu_seq = kvm->mmu_notifier_seq; smp_rmb(); - ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot); - if (ret != H_SUCCESS) - return ret; - ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL); + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); + ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot); if (ret != H_SUCCESS) - return ret; + goto out_unlock; - /* Check if we've been invalidated */ - arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); - if (mmu_notifier_retry(kvm, mmu_seq)) { - ret = H_TOO_HARD; + ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL); + if (ret != H_SUCCESS) goto out_unlock; - } /* Copy the page */ memcpy((void *)dest_pa, (void *)src_pa, SZ_4K); -- cgit v1.2.3-59-g8ed1b From 0e11df9649ac4251c1bc5f27b7d89636d9270a91 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:26 +0530 Subject: powerpc/kvm/book3s: Use pte_present instead of opencoding _PAGE_PRESENT check This adds _PAGE_PTE check and makes sure we validate the pte value returned via find_kvm_host_pte. NOTE: this also considers _PAGE_INVALID to the software valid bit. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-20-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/kvm_book3s_64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 1ca1f6495012..c58e64a0a74f 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -435,7 +435,7 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing) continue; } /* If pte is not present return None */ - if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT))) + if (unlikely(!pte_present(old_pte))) return __pte(0); new_pte = pte_mkyoung(old_pte); -- cgit v1.2.3-59-g8ed1b From e21dfbf01346ee4447d1533b1c57a003c773c6e3 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:27 +0530 Subject: powerpc/mm/book3s64: Avoid sending IPI on clearing PMD Now that all the lockless page table walk is careful w.r.t the PTE address returned, we can now revert commit: 13bd817bb884 ("powerpc/thp: Serialize pmd clear against a linux page table walk.") We also drop the equivalent IPI from other pte updates routines. We still keep IPI in hash pmdp collapse and that is to take care of parallel hash page table insert. The radix pmdp collapse flush can possibly be removed once I am sure generic code doesn't have the any expectations around parallel gup walk. This speeds up Qemu guest RAM del/unplug time as below 128 core, 496GB guest: Without patch: munmap start: timer = 13162 ms, PID=7684 munmap finish: timer = 95312 ms, PID=7684 - delta = 82150 ms With patch: munmap start: timer = 196449 ms, PID=6681 munmap finish: timer = 196488 ms, PID=6681 - delta = 39ms Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-21-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/hash_pgtable.c | 11 ----------- arch/powerpc/mm/book3s64/pgtable.c | 8 -------- arch/powerpc/mm/book3s64/radix_pgtable.c | 19 +++++++------------ 3 files changed, 7 insertions(+), 31 deletions(-) diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 64733b9cb20a..64ca375278dc 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -363,17 +363,6 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, * hash fault look at them. */ memset(pgtable, 0, PTE_FRAG_SIZE); - /* - * Serialize against find_current_mm_pte variants which does lock-less - * lookup in page tables with local interrupts disabled. For huge pages - * it casts pmd_t to pte_t. Since format of pte_t is different from - * pmd_t we want to prevent transit from pmd pointing to page table - * to pmd pointing to huge page (and back) while interrupts are disabled. - * We clear pmd to possibly replace it with page table pointer in - * different code paths. So make sure we wait for the parallel - * find_curren_mm_pte to finish. - */ - serialize_against_pte_lookup(mm); return old_pmd; } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index e0bb69c616e4..127325ead505 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -109,14 +109,6 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); - /* - * This ensures that generic code that rely on IRQ disabling - * to prevent a parallel THP split work as expected. - * - * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires - * a special case check in pmd_access_permitted. - */ - serialize_against_pte_lookup(vma->vm_mm); return __pmd(old_pmd); } diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 8f9edf07063a..dfb9fe92aea8 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -962,7 +962,13 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre pmd = *pmdp; pmd_clear(pmdp); - /*FIXME!! Verify whether we need this kick below */ + /* + * pmdp collapse_flush need to ensure that there are no parallel gup + * walk after this call. This is needed so that we can have stable + * page ref count when collapsing a page. We don't allow a collapse page + * if we have gup taken on the page. We can ensure that by sending IPI + * because gup walk happens with IRQ disabled. + */ serialize_against_pte_lookup(vma->vm_mm); radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); @@ -1023,17 +1029,6 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); old_pmd = __pmd(old); - /* - * Serialize against find_current_mm_pte which does lock-less - * lookup in page tables with local interrupts disabled. For huge pages - * it casts pmd_t to pte_t. Since format of pte_t is different from - * pmd_t we want to prevent transit from pmd pointing to page table - * to pmd pointing to huge page (and back) while interrupts are disabled. - * We clear pmd to possibly replace it with page table pointer in - * different code paths. So make sure we wait for the parallel - * find_current_mm_pte to finish. - */ - serialize_against_pte_lookup(mm); return old_pmd; } -- cgit v1.2.3-59-g8ed1b From 75358ea359e7c0dfceb3c7b3d854570b4260cb7f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:29 +0530 Subject: powerpc/mm/book3s64: Fix MADV_DONTNEED and parallel page fault race MADV_DONTNEED holds mmap_sem in read mode and that implies a parallel page fault is possible and the kernel can end up with a level 1 PTE entry (THP entry) converted to a level 0 PTE entry without flushing the THP TLB entry. Most architectures including POWER have issues with kernel instantiating a level 0 PTE entry while holding level 1 TLB entries. The code sequence I am looking at is down_read(mmap_sem) down_read(mmap_sem) zap_pmd_range() zap_huge_pmd() pmd lock held pmd_cleared table details added to mmu_gather pmd_unlock() insert a level 0 PTE entry() tlb_finish_mmu(). Fix this by forcing a tlb flush before releasing pmd lock if this is not a fullmm invalidate. We can safely skip this invalidate for task exit case (fullmm invalidate) because in that case we are sure there can be no parallel fault handlers. This do change the Qemu guest RAM del/unplug time as below 128 core, 496GB guest: Without patch: munmap start: timer = 196449 ms, PID=6681 munmap finish: timer = 196488 ms, PID=6681 - delta = 39ms With patch: munmap start: timer = 196345 ms, PID=6879 munmap finish: timer = 196714 ms, PID=6879 - delta = 369ms Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-23-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/pgtable.h | 5 +++++ arch/powerpc/mm/book3s64/pgtable.c | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 03521a8b0292..e1f551159f7d 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1265,6 +1265,11 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, } #define pmdp_collapse_flush pmdp_collapse_flush +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL +pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, + unsigned long addr, + pmd_t *pmdp, int full); + #define __HAVE_ARCH_PGTABLE_DEPOSIT static inline void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 127325ead505..54b6d6d103ea 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -112,6 +112,24 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, return __pmd(old_pmd); } +pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, int full) +{ + pmd_t pmd; + VM_BUG_ON(addr & ~HPAGE_PMD_MASK); + VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && + !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); + pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); + /* + * if it not a fullmm flush, then we can possibly end up converting + * this PMD pte entry to a regular level 0 PTE by a parallel page fault. + * Make sure we flush the tlb in this case. + */ + if (!full) + flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); + return pmd; +} + static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) { return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); -- cgit v1.2.3-59-g8ed1b From b1f9be9392f090f08e4ad9e2c68963aeff03bd67 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 20 Feb 2020 09:15:06 +0100 Subject: powerpc/xive: Enforce load-after-store ordering when StoreEOI is active MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When an interrupt has been handled, the OS notifies the interrupt controller with a EOI sequence. On a POWER9 system using the XIVE interrupt controller, this can be done with a load or a store operation on the ESB interrupt management page of the interrupt. The StoreEOI operation has less latency and improves interrupt handling performance but it was deactivated during the POWER9 DD2.0 timeframe because of ordering issues. We use the LoadEOI today but we plan to reactivate StoreEOI in future architectures. There is usually no need to enforce ordering between ESB load and store operations as they should lead to the same result. E.g. a store trigger and a load EOI can be executed in any order. Assuming the interrupt state is PQ=10, a store trigger followed by a load EOI will return a Q bit. In the reverse order, it will create a new interrupt trigger from HW. In both cases, the handler processing interrupts is notified. In some cases, the XIVE_ESB_SET_PQ_10 load operation is used to disable temporarily the interrupt source (mask/unmask). When the source is reenabled, the OS can detect if interrupts were received while the source was disabled and reinject them. This process needs special care when StoreEOI is activated. The ESB load and store operations should be correctly ordered because a XIVE_ESB_STORE_EOI operation could leave the source enabled if it has not completed before the loads. For those cases, we enforce Load-after-Store ordering with a special load operation offset. To avoid performance impact, this ordering is only enforced when really needed, that is when interrupt sources are temporarily disabled with the XIVE_ESB_SET_PQ_10 load. It should not be needed for other loads. Signed-off-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200220081506.31209-1-clg@kaod.org --- arch/powerpc/include/asm/xive-regs.h | 8 ++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 5 +++++ arch/powerpc/kvm/book3s_xive_native.c | 6 ++++++ arch/powerpc/kvm/book3s_xive_template.c | 3 +++ arch/powerpc/sysdev/xive/common.c | 3 +++ 5 files changed, 25 insertions(+) diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h index 33aee7490cbb..8b211faa0e42 100644 --- a/arch/powerpc/include/asm/xive-regs.h +++ b/arch/powerpc/include/asm/xive-regs.h @@ -37,6 +37,14 @@ #define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ #define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ +/* + * Load-after-store ordering + * + * Adding this offset to the load address will enforce + * load-after-store ordering. This is required to use StoreEOI. + */ +#define XIVE_ESB_LD_ST_MO 0x40 /* Load-after-store ordering */ + #define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_Q 0x1 #define XIVE_ESB_INVALID 0xFF diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 780a499c7114..faae45b8cadf 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2907,6 +2907,11 @@ kvm_cede_exit: beq 4f li r0, 0 stb r0, VCPU_CEDED(r9) + /* + * The escalation interrupts are special as we don't EOI them. + * There is no need to use the load-after-store ordering offset + * to set PQ to 10 as we won't use StoreEOI. + */ li r6, XIVE_ESB_SET_PQ_10 b 5f 4: li r0, 1 diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 6ef0151ff70a..bdea91df1497 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -31,6 +31,12 @@ static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) { u64 val; + /* + * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10 + * load operation, so there is no need to enforce load-after-store + * ordering. + */ + if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) offset |= offset << 4; diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index a8a900ace1e6..4ad3c0279458 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -58,6 +58,9 @@ static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset) { u64 val; + if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) + offset |= XIVE_ESB_LD_ST_MO; + if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) offset |= offset << 4; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index b294f70f1a67..9603b2830d03 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -196,6 +196,9 @@ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) { u64 val; + if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) + offset |= XIVE_ESB_LD_ST_MO; + /* Handle HW errata */ if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) offset |= offset << 4; -- cgit v1.2.3-59-g8ed1b From e2a8b49e79553bd8ec48f73cead84e6146c09408 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 7 May 2020 22:33:24 +1000 Subject: powerpc/uaccess: Don't use "m<>" constraint The "m<>" constraint breaks compilation with GCC 4.6.x era compilers. The use of the constraint allows the compiler to use update-form instructions, however in practice current compilers never generate those forms for any of the current uses of __put_user_asm_goto(). We anticipate that GCC 4.6 will be declared unsupported for building the kernel in the not too distant future. So for now just switch to the "m" constraint. Fixes: 334710b1496a ("powerpc/uaccess: Implement unsafe_put_user() using 'asm goto'") Signed-off-by: Michael Ellerman Acked-by: Segher Boessenkool Link: https://lore.kernel.org/r/20200507123324.2250024-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 62cc8d7640ec..164112007f54 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -210,7 +210,7 @@ do { \ "1: " op "%U1%X1 %0,%1 # put_user\n" \ EX_TABLE(1b, %l2) \ : \ - : "r" (x), "m<>" (*addr) \ + : "r" (x), "m" (*addr) \ : \ : label) -- cgit v1.2.3-59-g8ed1b From 2f62870ca5bc9d305f3c212192320c29e9dbdc54 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 2 May 2020 13:59:49 +0200 Subject: powerpc/powernv: Fix a warning message Fix a cut'n'paste error in a warning message. This should be 'cpu-idle-state-residency-ns' to match the property searched in the previous 'of_property_read_u32_array()' Fixes: 9c7b185ab2fe ("powernv/cpuidle: Parse dt idle properties into global structure") Signed-off-by: Christophe JAILLET Reviewed-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200502115949.139000-1-christophe.jaillet@wanadoo.fr --- arch/powerpc/platforms/powernv/idle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 78599bca66c2..2dd467383a88 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -1270,7 +1270,7 @@ static int pnv_parse_cpuidle_dt(void) /* Read residencies */ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns", temp_u32, nr_idle_states)) { - pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n"); rc = -EINVAL; goto out; } -- cgit v1.2.3-59-g8ed1b From bac7ca7b985b72873bd4ac2553b13b5af5b1f08a Mon Sep 17 00:00:00 2001 From: Andrey Abramov Date: Tue, 2 Apr 2019 23:47:22 +0300 Subject: powerpc: module_[32|64].c: replace swap function with built-in one Replace relaswap with built-in one, because relaswap does a simple byte to byte swap. Since Spectre mitigations have made indirect function calls more expensive, and the default simple byte copies swap is implemented without them, an "optimized" custom swap function is now a waste of time as well as code. Signed-off-by: Andrey Abramov Reviewed-by: George Spelvin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/994931554238042@iva8-b333b7f98ab0.qloud-c.yandex.net --- arch/powerpc/kernel/module_32.c | 17 +---------------- arch/powerpc/kernel/module_64.c | 17 +---------------- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index d7134c614c16..c27b8687b82a 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -67,21 +67,6 @@ static int relacmp(const void *_x, const void *_y) return 0; } -static void relaswap(void *_x, void *_y, int size) -{ - uint32_t *x, *y, tmp; - int i; - - y = (uint32_t *)_x; - x = (uint32_t *)_y; - - for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) { - tmp = x[i]; - x[i] = y[i]; - y[i] = tmp; - } -} - /* Get the potential trampolines size required of the init and non-init sections */ static unsigned long get_plt_size(const Elf32_Ehdr *hdr, @@ -118,7 +103,7 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr, */ sort((void *)hdr + sechdrs[i].sh_offset, sechdrs[i].sh_size / sizeof(Elf32_Rela), - sizeof(Elf32_Rela), relacmp, relaswap); + sizeof(Elf32_Rela), relacmp, NULL); ret += count_relocs((void *)hdr + sechdrs[i].sh_offset, diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 007606a48fd9..f808159f3dfd 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -226,21 +226,6 @@ static int relacmp(const void *_x, const void *_y) return 0; } -static void relaswap(void *_x, void *_y, int size) -{ - uint64_t *x, *y, tmp; - int i; - - y = (uint64_t *)_x; - x = (uint64_t *)_y; - - for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) { - tmp = x[i]; - x[i] = y[i]; - y[i] = tmp; - } -} - /* Get size of potential trampolines required. */ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, const Elf64_Shdr *sechdrs) @@ -264,7 +249,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, */ sort((void *)sechdrs[i].sh_addr, sechdrs[i].sh_size / sizeof(Elf64_Rela), - sizeof(Elf64_Rela), relacmp, relaswap); + sizeof(Elf64_Rela), relacmp, NULL); relocs += count_relocs((void *)sechdrs[i].sh_addr, sechdrs[i].sh_size -- cgit v1.2.3-59-g8ed1b From ad0f522df1b2f4fe5d4ae6418e1ea216154a0662 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Sat, 2 May 2020 16:26:42 +0200 Subject: powerpc/5200: update contact email My 'pengutronix' address is defunct for years. Merge the entries and use the proper contact address. Signed-off-by: Wolfram Sang Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200502142642.18979-1-wsa@kernel.org --- arch/powerpc/boot/dts/pcm032.dts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts index c259c6b3ac5a..780e13d99e7b 100644 --- a/arch/powerpc/boot/dts/pcm032.dts +++ b/arch/powerpc/boot/dts/pcm032.dts @@ -3,9 +3,7 @@ * phyCORE-MPC5200B-IO (pcm032) board Device Tree Source * * Copyright (C) 2006-2009 Pengutronix - * Sascha Hauer - * Juergen Beisert - * Wolfram Sang + * Sascha Hauer, Juergen Beisert, Wolfram Sang */ /include/ "mpc5200b.dtsi" -- cgit v1.2.3-59-g8ed1b From 679d74abc4e14cb369e46f080d90f4dc8c143e65 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 6 May 2020 06:51:59 +0000 Subject: powerpc/8xx: Update email address in MAINTAINERS Since 01 May 2020, our email adresses have changed to @csgroup.eu Update MAINTAINERS accordingly. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/9fd0f9a827ebbeae64ad7a6f6c595d242f4dd5fc.1588747860.git.christophe.leroy@csgroup.eu --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index b816a453b10e..efb37e3fa623 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9786,7 +9786,7 @@ F: arch/powerpc/platforms/83xx/ F: arch/powerpc/platforms/85xx/ LINUX FOR POWERPC EMBEDDED PPC8XX -M: Christophe Leroy +M: Christophe Leroy L: linuxppc-dev@lists.ozlabs.org S: Maintained F: arch/powerpc/platforms/8xx/ -- cgit v1.2.3-59-g8ed1b From 7bfc3c84cbf5167d943cff9b3d2619dab0b7894c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 20 Apr 2020 18:36:34 +0000 Subject: drivers/powerpc: Replace _ALIGN_UP() by ALIGN() _ALIGN_UP() is specific to powerpc ALIGN() is generic and does the same Replace _ALIGN_UP() by ALIGN() Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Reviewed-by: Joel Stanley Link: https://lore.kernel.org/r/a5945463f86c984151962a475a3ee56a2893e85d.1587407777.git.christophe.leroy@c-s.fr --- drivers/ps3/ps3-lpm.c | 6 +++--- drivers/vfio/pci/vfio_pci_nvlink2.c | 2 +- drivers/video/fbdev/ps3fb.c | 4 ++-- sound/ppc/snd_ps3.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c index 83c45659bc9d..064b5884ba13 100644 --- a/drivers/ps3/ps3-lpm.c +++ b/drivers/ps3/ps3-lpm.c @@ -1096,8 +1096,8 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache, lpm_priv->tb_cache_internal = NULL; lpm_priv->tb_cache = NULL; } else if (tb_cache) { - if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128) - || tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) { + if (tb_cache != (void *)ALIGN((unsigned long)tb_cache, 128) + || tb_cache_size != ALIGN(tb_cache_size, 128)) { dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n", __func__, __LINE__); result = -EINVAL; @@ -1116,7 +1116,7 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache, result = -ENOMEM; goto fail_malloc; } - lpm_priv->tb_cache = (void *)_ALIGN_UP( + lpm_priv->tb_cache = (void *)ALIGN( (unsigned long)lpm_priv->tb_cache_internal, 128); } diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c index ed20d73cc27c..65c61710c0e9 100644 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c +++ b/drivers/vfio/pci/vfio_pci_nvlink2.c @@ -67,7 +67,7 @@ static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev, * * This is not fast path anyway. */ - sizealigned = _ALIGN_UP(posoff + count, PAGE_SIZE); + sizealigned = ALIGN(posoff + count, PAGE_SIZE); ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned); if (!ptr) return -EFAULT; diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index 834f63edf700..9df78fb77267 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -44,7 +44,7 @@ #define GPU_CMD_BUF_SIZE (2 * 1024 * 1024) #define GPU_FB_START (64 * 1024) #define GPU_IOIF (0x0d000000UL) -#define GPU_ALIGN_UP(x) _ALIGN_UP((x), 64) +#define GPU_ALIGN_UP(x) ALIGN((x), 64) #define GPU_MAX_LINE_LENGTH (65536 - 64) #define GPU_INTR_STATUS_VSYNC_0 0 /* vsync on head A */ @@ -1015,7 +1015,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev) } #endif - max_ps3fb_size = _ALIGN_UP(GPU_IOIF, 256*1024*1024) - GPU_IOIF; + max_ps3fb_size = ALIGN(GPU_IOIF, 256*1024*1024) - GPU_IOIF; if (ps3fb_videomemory.size > max_ps3fb_size) { dev_info(&dev->core, "Limiting ps3fb mem size to %lu bytes\n", max_ps3fb_size); diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c index 6d2a33b8faa0..b8161a08f2ca 100644 --- a/sound/ppc/snd_ps3.c +++ b/sound/ppc/snd_ps3.c @@ -926,7 +926,7 @@ static int snd_ps3_driver_probe(struct ps3_system_bus_device *dev) PAGE_SHIFT, /* use system page size */ 0, /* dma type; not used */ NULL, - _ALIGN_UP(SND_PS3_DMA_REGION_SIZE, PAGE_SIZE)); + ALIGN(SND_PS3_DMA_REGION_SIZE, PAGE_SIZE)); dev->d_region->ioid = PS3_AUDIO_IOID; ret = ps3_dma_region_create(dev->d_region); -- cgit v1.2.3-59-g8ed1b From e96d904ede6756641563d27daa746875b478a6c8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 20 Apr 2020 18:36:35 +0000 Subject: powerpc: Replace _ALIGN_DOWN() by ALIGN_DOWN() _ALIGN_DOWN() is specific to powerpc ALIGN_DOWN() is generic and does the same Replace _ALIGN_DOWN() by ALIGN_DOWN() Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Reviewed-by: Joel Stanley Link: https://lore.kernel.org/r/3911a86d6b5bfa7ad88cd7c82416fbe6bb47e793.1587407777.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/32/pgtable.h | 2 +- arch/powerpc/kernel/pci_64.c | 2 +- arch/powerpc/kernel/prom.c | 6 +++--- arch/powerpc/kernel/prom_init.c | 8 ++++---- arch/powerpc/mm/book3s64/hash_tlb.c | 4 ++-- arch/powerpc/mm/init_64.c | 4 ++-- arch/powerpc/platforms/powernv/opal-fadump.c | 2 +- arch/powerpc/platforms/powernv/pci-ioda.c | 2 +- arch/powerpc/platforms/ps3/mm.c | 14 +++++++------- arch/powerpc/platforms/pseries/rtas-fadump.c | 2 +- 11 files changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 7549393c4c43..53b5c93eaf5d 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -195,7 +195,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #endif #ifdef CONFIG_KASAN_VMALLOC -#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) #else #define VMALLOC_END ioremap_bot #endif diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index b04ba257fddb..5b4d4c4297e1 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -116,7 +116,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #endif #ifdef CONFIG_KASAN_VMALLOC -#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) #else #define VMALLOC_END ioremap_bot #endif diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 30d07fc79dd1..89591fb31fb6 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -130,7 +130,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose) unsigned long size_page; unsigned long io_virt_offset; - phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); + phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); /* Make sure IO area address is clear */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 6620f37abe73..10b5d5eafd34 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -96,7 +96,7 @@ static inline int overlaps_initrd(unsigned long start, unsigned long size) if (!initrd_start) return 0; - return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) && + return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) && start <= _ALIGN_UP(initrd_end, PAGE_SIZE); #else return 0; @@ -623,9 +623,9 @@ static void __init early_reserve_mem(void) #ifdef CONFIG_BLK_DEV_INITRD /* Then reserve the initrd, if any */ if (initrd_start && (initrd_end > initrd_start)) { - memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), + memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), _ALIGN_UP(initrd_end, PAGE_SIZE) - - _ALIGN_DOWN(initrd_start, PAGE_SIZE)); + ALIGN_DOWN(initrd_start, PAGE_SIZE)); } #endif /* CONFIG_BLK_DEV_INITRD */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 806be751c336..4cf5958eebd4 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1500,7 +1500,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, if (highmem) { /* Carve out storage for the TCE table. */ - addr = _ALIGN_DOWN(alloc_top_high - size, align); + addr = ALIGN_DOWN(alloc_top_high - size, align); if (addr <= alloc_bottom) return 0; /* Will we bump into the RMO ? If yes, check out that we @@ -1518,9 +1518,9 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, goto bail; } - base = _ALIGN_DOWN(alloc_top - size, align); + base = ALIGN_DOWN(alloc_top - size, align); for (; base > alloc_bottom; - base = _ALIGN_DOWN(base - 0x100000, align)) { + base = ALIGN_DOWN(base - 0x100000, align)) { prom_debug(" trying: 0x%lx\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) @@ -1586,7 +1586,7 @@ static void __init reserve_mem(u64 base, u64 size) * have our terminator with "size" set to 0 since we are * dumb and just copy this entire array to the boot params */ - base = _ALIGN_DOWN(base, PAGE_SIZE); + base = ALIGN_DOWN(base, PAGE_SIZE); top = _ALIGN_UP(top, PAGE_SIZE); size = top - base; diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c index 1fa2173413b5..a500979fbc59 100644 --- a/arch/powerpc/mm/book3s64/hash_tlb.c +++ b/arch/powerpc/mm/book3s64/hash_tlb.c @@ -193,7 +193,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end) int hugepage_shift; unsigned long flags; - start = _ALIGN_DOWN(start, PAGE_SIZE); + start = ALIGN_DOWN(start, PAGE_SIZE); end = _ALIGN_UP(end, PAGE_SIZE); @@ -228,7 +228,7 @@ void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) pte_t *start_pte; unsigned long flags; - addr = _ALIGN_DOWN(addr, PMD_SIZE); + addr = ALIGN_DOWN(addr, PMD_SIZE); /* * Note: Normally, we should only ever use a batch within a * PTE locked section. This violates the rule, but will work diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 4002ced3596f..c7ce4ec5060e 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -203,7 +203,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; /* Align to the page size of the linear mapping. */ - start = _ALIGN_DOWN(start, page_size); + start = ALIGN_DOWN(start, page_size); pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); @@ -292,7 +292,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, unsigned long alt_start = ~0, alt_end = ~0; unsigned long base_pfn; - start = _ALIGN_DOWN(start, page_size); + start = ALIGN_DOWN(start, page_size); if (altmap) { alt_start = altmap->base_pfn; alt_end = altmap->base_pfn + altmap->reserve + diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c index d361d37d975f..9a360ced663b 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.c +++ b/arch/powerpc/platforms/powernv/opal-fadump.c @@ -671,7 +671,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) * Firmware supports 32-bit field for size. Align it to PAGE_SIZE * and request firmware to copy multiple kernel boot memory regions. */ - fadump_conf->max_copy_size = _ALIGN_DOWN(U32_MAX, PAGE_SIZE); + fadump_conf->max_copy_size = ALIGN_DOWN(U32_MAX, PAGE_SIZE); /* * Check if dump has been initiated on last reboot. diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 57d3a6af1d52..276b011cd45d 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -264,7 +264,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, if (!r->parent || !pnv_pci_is_m64(phb, r)) continue; - start = _ALIGN_DOWN(r->start - base, sgsz); + start = ALIGN_DOWN(r->start - base, sgsz); end = _ALIGN_UP(r->end - base, sgsz); for (segno = start / sgsz; segno < end / sgsz; segno++) { if (pe_bitmap) diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 423be34f0f5f..71ed37f7f475 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -263,7 +263,7 @@ static int ps3_mm_region_create(struct mem_region *r, unsigned long size) int result; u64 muid; - r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); + r->size = ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size); @@ -394,7 +394,7 @@ static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, unsigned long bus_addr, unsigned long len) { struct dma_chunk *c; - unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); + unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus, 1 << r->page_size); @@ -423,7 +423,7 @@ static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r, unsigned long lpar_addr, unsigned long len) { struct dma_chunk *c; - unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size); + unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar, 1 << r->page_size); @@ -775,7 +775,7 @@ static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr, struct dma_chunk *c; unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; - unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); + unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 1 << r->page_size); *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); @@ -830,7 +830,7 @@ static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr, struct dma_chunk *c; unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; - unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); + unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 1 << r->page_size); @@ -889,7 +889,7 @@ static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, c = dma_find_chunk(r, bus_addr, len); if (!c) { - unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, + unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + bus_addr - aligned_bus, 1 << r->page_size); @@ -926,7 +926,7 @@ static int dma_ioc0_unmap_area(struct ps3_dma_region *r, c = dma_find_chunk(r, bus_addr, len); if (!c) { - unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, + unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + bus_addr - aligned_bus, diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c index 70c3013fdd07..81343908ed33 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.c +++ b/arch/powerpc/platforms/pseries/rtas-fadump.c @@ -506,7 +506,7 @@ void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) fadump_conf->fadump_supported = 1; /* Firmware supports 64-bit value for size, align it to pagesize. */ - fadump_conf->max_copy_size = _ALIGN_DOWN(U64_MAX, PAGE_SIZE); + fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE); /* * The 'ibm,kernel-dump' rtas node is present only if there is -- cgit v1.2.3-59-g8ed1b From b711531641038f3ff3723914f3d5ba79848d347e Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 20 Apr 2020 18:36:36 +0000 Subject: powerpc: Replace _ALIGN_UP() by ALIGN() _ALIGN_UP() is specific to powerpc ALIGN() is generic and does the same Replace _ALIGN_UP() by ALIGN() Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Reviewed-by: Joel Stanley Link: https://lore.kernel.org/r/8a6d7e45f7904c73a0af539642d3962e2a3c7268.1587407777.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/iommu.h | 4 ++-- arch/powerpc/kernel/head_booke.h | 2 +- arch/powerpc/kernel/nvram_64.c | 4 ++-- arch/powerpc/kernel/pci_64.c | 2 +- arch/powerpc/kernel/prom.c | 4 ++-- arch/powerpc/kernel/prom_init.c | 8 ++++---- arch/powerpc/kvm/book3s_64_vio_hv.c | 2 +- arch/powerpc/mm/book3s64/hash_tlb.c | 2 +- arch/powerpc/mm/book3s64/radix_pgtable.c | 2 +- arch/powerpc/mm/slice.c | 2 +- arch/powerpc/platforms/cell/iommu.c | 6 +++--- arch/powerpc/platforms/powermac/bootx_init.c | 10 +++++----- arch/powerpc/platforms/powernv/pci-ioda.c | 8 ++++---- arch/powerpc/platforms/ps3/mm.c | 16 ++++++++-------- arch/powerpc/platforms/ps3/setup.c | 2 +- 15 files changed, 37 insertions(+), 37 deletions(-) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 350101e11ddb..5032f1593299 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -22,11 +22,11 @@ #define IOMMU_PAGE_SHIFT_4K 12 #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) -#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) +#define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K) #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) -#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr)) +#define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr)) /* Boot time flags */ extern int iommu_is_off; diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index bd2e5ed8dd50..18f87bf9e32b 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -534,7 +534,7 @@ struct exception_regs { }; /* ensure this structure is always sized to a multiple of the stack alignment */ -#define STACK_EXC_LVL_FRAME_SIZE _ALIGN_UP(sizeof (struct exception_regs), 16) +#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16) #endif /* __ASSEMBLY__ */ #endif /* __HEAD_BOOKE_H__ */ diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index fb4f61096613..314780e8ef78 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -854,8 +854,8 @@ loff_t __init nvram_create_partition(const char *name, int sig, BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16); /* Convert sizes from bytes to blocks */ - req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; - min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; + req_size = ALIGN(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; + min_size = ALIGN(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; /* If no minimum size specified, make it the same as the * requested size diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 89591fb31fb6..6a932de18aa6 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -131,7 +131,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose) unsigned long io_virt_offset; phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); - size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); + size_page = ALIGN(hose->pci_io_size, PAGE_SIZE); /* Make sure IO area address is clear */ hose->io_base_alloc = NULL; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 10b5d5eafd34..1dcf0e214a22 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -97,7 +97,7 @@ static inline int overlaps_initrd(unsigned long start, unsigned long size) return 0; return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) && - start <= _ALIGN_UP(initrd_end, PAGE_SIZE); + start <= ALIGN(initrd_end, PAGE_SIZE); #else return 0; #endif @@ -624,7 +624,7 @@ static void __init early_reserve_mem(void) /* Then reserve the initrd, if any */ if (initrd_start && (initrd_end > initrd_start)) { memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), - _ALIGN_UP(initrd_end, PAGE_SIZE) - + ALIGN(initrd_end, PAGE_SIZE) - ALIGN_DOWN(initrd_start, PAGE_SIZE)); } #endif /* CONFIG_BLK_DEV_INITRD */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 4cf5958eebd4..3a5a7db4564f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1449,18 +1449,18 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) unsigned long addr = 0; if (align) - base = _ALIGN_UP(base, align); + base = ALIGN(base, align); prom_debug("%s(%lx, %lx)\n", __func__, size, align); if (ram_top == 0) prom_panic("alloc_up() called with mem not initialized\n"); if (align) - base = _ALIGN_UP(alloc_bottom, align); + base = ALIGN(alloc_bottom, align); else base = alloc_bottom; for(; (base + size) <= alloc_top; - base = _ALIGN_UP(base + 0x100000, align)) { + base = ALIGN(base + 0x100000, align)) { prom_debug(" trying: 0x%lx\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) @@ -1587,7 +1587,7 @@ static void __init reserve_mem(u64 base, u64 size) * dumb and just copy this entire array to the boot params */ base = ALIGN_DOWN(base, PAGE_SIZE); - top = _ALIGN_UP(top, PAGE_SIZE); + top = ALIGN(top, PAGE_SIZE); size = top - base; if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 167029e57c8f..ac6ac192b8bb 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -205,7 +205,7 @@ static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt, idx = (ioba >> stt->page_shift) - stt->offset; sttpage = idx / TCES_PER_PAGE; - sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) / + sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) / TCES_PER_PAGE; for (i = sttpage; i < sttpage + sttpages; ++i) if (!stt->pages[i]) diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c index a500979fbc59..0fbf3dc9f2c2 100644 --- a/arch/powerpc/mm/book3s64/hash_tlb.c +++ b/arch/powerpc/mm/book3s64/hash_tlb.c @@ -194,7 +194,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end) unsigned long flags; start = ALIGN_DOWN(start, PAGE_SIZE); - end = _ALIGN_UP(end, PAGE_SIZE); + end = ALIGN(end, PAGE_SIZE); /* diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index dfb9fe92aea8..408176086dd5 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -261,7 +261,7 @@ static int __meminit create_physical_mapping(unsigned long start, pgprot_t prot; int psize; - start = _ALIGN_UP(start, PAGE_SIZE); + start = ALIGN(start, PAGE_SIZE); for (addr = start; addr < end; addr += mapping_size) { unsigned long gap, previous_size; int rc; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index dffe1a45b6ed..82b45b1cb973 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -478,7 +478,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { - addr = _ALIGN_UP(addr, page_size); + addr = ALIGN(addr, page_size); slice_dbg(" aligned addr=%lx\n", addr); /* Ignore hint if it's too large or overlaps a VMA */ if (addr > high_limit - len || addr < mmap_min_addr || diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index ca9ffc1c8685..2124831cf57c 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -943,7 +943,7 @@ static int __init cell_iommu_fixed_mapping_init(void) fbase = max(fbase, dbase + dsize); } - fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); + fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT); fsize = memblock_phys_mem_size(); if ((fbase + fsize) <= 0x800000000ul) @@ -963,8 +963,8 @@ static int __init cell_iommu_fixed_mapping_init(void) hend = hbase + htab_size_bytes; /* The window must start and end on a segment boundary */ - if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) || - (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) { + if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) || + (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) { pr_debug("iommu: hash window not segment aligned\n"); return -1; } diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index af309ee99114..c3374a90952f 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -108,7 +108,7 @@ static void * __init bootx_early_getprop(unsigned long base, #define dt_push_token(token, mem) \ do { \ - *(mem) = _ALIGN_UP(*(mem),4); \ + *(mem) = ALIGN(*(mem),4); \ *((u32 *)*(mem)) = token; \ *(mem) += 4; \ } while(0) @@ -150,7 +150,7 @@ static void __init bootx_dt_add_prop(char *name, void *data, int size, /* push property content */ if (size && data) { memcpy((void *)*mem_end, data, size); - *mem_end = _ALIGN_UP(*mem_end + size, 4); + *mem_end = ALIGN(*mem_end + size, 4); } } @@ -303,7 +303,7 @@ static void __init bootx_scan_dt_build_struct(unsigned long base, *lp++ = *p; } *lp = 0; - *mem_end = _ALIGN_UP((unsigned long)lp + 1, 4); + *mem_end = ALIGN((unsigned long)lp + 1, 4); /* get and store all properties */ while (*ppp) { @@ -356,11 +356,11 @@ static unsigned long __init bootx_flatten_dt(unsigned long start) /* Start using memory after the big blob passed by BootX, get * some space for the header */ - mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4); + mem_start = mem_end = ALIGN(((unsigned long)bi) + start, 4); DBG("Boot params header at: %x\n", mem_start); hdr = (struct boot_param_header *)mem_start; mem_end += sizeof(struct boot_param_header); - rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8)); + rsvmap = (u64 *)(ALIGN(mem_end, 8)); hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start; mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 276b011cd45d..d1a16ebc31bb 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -265,7 +265,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, continue; start = ALIGN_DOWN(r->start - base, sgsz); - end = _ALIGN_UP(r->end - base, sgsz); + end = ALIGN(r->end - base, sgsz); for (segno = start / sgsz; segno < end / sgsz; segno++) { if (pe_bitmap) set_bit(segno, pe_bitmap); @@ -361,7 +361,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) return NULL; /* Allocate bitmap */ - size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long)); + size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long)); pe_alloc = kzalloc(size, GFP_KERNEL); if (!pe_alloc) { pr_warn("%s: Out of memory !\n", @@ -2537,7 +2537,7 @@ unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, direct_table_size = 1UL << table_shift; for ( ; levels; --levels) { - bytes += _ALIGN_UP(tce_table_size, direct_table_size); + bytes += ALIGN(tce_table_size, direct_table_size); tce_table_size /= direct_table_size; tce_table_size <<= 3; @@ -3863,7 +3863,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, PNV_IODA1_DMA32_SEGSIZE; /* Allocate aux data & arrays. We don't have IO ports on PHB3 */ - size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, + size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, sizeof(unsigned long)); m64map_off = size; size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]); diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 71ed37f7f475..b83f2c851b40 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -395,7 +395,7 @@ static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, { struct dma_chunk *c; unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); - unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus, + unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus, 1 << r->page_size); list_for_each_entry(c, &r->chunk_list.head, link) { @@ -424,7 +424,7 @@ static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r, { struct dma_chunk *c; unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size); - unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar, + unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar, 1 << r->page_size); list_for_each_entry(c, &r->chunk_list.head, link) { @@ -776,7 +776,7 @@ static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); - unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, + unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys, 1 << r->page_size); *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); @@ -831,7 +831,7 @@ static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); - unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, + unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys, 1 << r->page_size); DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__, @@ -891,7 +891,7 @@ static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, if (!c) { unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); - unsigned long aligned_len = _ALIGN_UP(len + bus_addr + unsigned long aligned_len = ALIGN(len + bus_addr - aligned_bus, 1 << r->page_size); DBG("%s:%d: not found: bus_addr %llxh\n", __func__, __LINE__, bus_addr); @@ -928,7 +928,7 @@ static int dma_ioc0_unmap_area(struct ps3_dma_region *r, if (!c) { unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); - unsigned long aligned_len = _ALIGN_UP(len + bus_addr + unsigned long aligned_len = ALIGN(len + bus_addr - aligned_bus, 1 << r->page_size); DBG("%s:%d: not found: bus_addr %llxh\n", @@ -974,7 +974,7 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r) pr_info("%s:%d: forcing 16M pages for linear map\n", __func__, __LINE__); r->page_size = PS3_DMA_16M; - r->len = _ALIGN_UP(r->len, 1 << r->page_size); + r->len = ALIGN(r->len, 1 << r->page_size); } } @@ -1125,7 +1125,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev, r->offset = lpar_addr; if (r->offset >= map.rm.size) r->offset -= map.r1.offset; - r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); + r->len = len ? len : ALIGN(map.total, 1 << r->page_size); switch (dev->dev_type) { case PS3_DEVICE_TYPE_SB: diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index b29368931c56..e9ae5dd03593 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -138,7 +138,7 @@ static int __init early_parse_ps3fb(char *p) if (!p) return 1; - ps3fb_videomemory.size = _ALIGN_UP(memparse(p, &p), + ps3fb_videomemory.size = ALIGN(memparse(p, &p), ps3fb_videomemory.align); return 0; } -- cgit v1.2.3-59-g8ed1b From d3f3d3bf76cfb04e73436a15e3987d3573e7523a Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 20 Apr 2020 18:36:37 +0000 Subject: powerpc: Replace _ALIGN() by ALIGN() _ALIGN() is specific to powerpc ALIGN() is generic and does the same Replace _ALIGN() by ALIGN() Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Reviewed-by: Joel Stanley Link: https://lore.kernel.org/r/4006d9c8e69f8eaccee954899f6b5fb76240d00b.1587407777.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/32/pgtable.h | 2 +- arch/powerpc/kernel/prom_init.c | 8 ++++---- arch/powerpc/platforms/powermac/bootx_init.c | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 53b5c93eaf5d..0d4bccb4b9f2 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -188,7 +188,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); * memory shall not share segments. */ #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES) -#define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \ +#define VMALLOC_START ((ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \ ~(VMALLOC_OFFSET - 1)) #else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 5b4d4c4297e1..4315d40906a0 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -110,7 +110,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #define VMALLOC_OFFSET (0x1000000) /* 16M */ #ifdef PPC_PIN_SIZE -#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) #else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 3a5a7db4564f..e3a9fde51c4f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2426,7 +2426,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, { void *ret; - *mem_start = _ALIGN(*mem_start, align); + *mem_start = ALIGN(*mem_start, align); while ((*mem_start + needed) > *mem_end) { unsigned long room, chunk; @@ -2562,7 +2562,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, *lp++ = *p; } *lp = 0; - *mem_start = _ALIGN((unsigned long)lp + 1, 4); + *mem_start = ALIGN((unsigned long)lp + 1, 4); } /* get it again for debugging */ @@ -2608,7 +2608,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, /* push property content */ valp = make_room(mem_start, mem_end, l, 4); call_prom("getprop", 4, 1, node, pname, valp, l); - *mem_start = _ALIGN(*mem_start, 4); + *mem_start = ALIGN(*mem_start, 4); if (!prom_strcmp(pname, "phandle")) has_phandle = 1; @@ -2667,7 +2667,7 @@ static void __init flatten_device_tree(void) prom_panic ("couldn't get device tree root\n"); /* Build header and make room for mem rsv map */ - mem_start = _ALIGN(mem_start, 4); + mem_start = ALIGN(mem_start, 4); hdr = make_room(&mem_start, &mem_end, sizeof(struct boot_param_header), 4); dt_header_start = (unsigned long)hdr; diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index c3374a90952f..9d4ecd292255 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -386,7 +386,7 @@ static unsigned long __init bootx_flatten_dt(unsigned long start) hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase; /* Build structure */ - mem_end = _ALIGN(mem_end, 16); + mem_end = ALIGN(mem_end, 16); DBG("Building device tree structure at: %x\n", mem_end); hdr->off_dt_struct = mem_end - mem_start; bootx_scan_dt_build_struct(base, 4, &mem_end); @@ -404,7 +404,7 @@ static unsigned long __init bootx_flatten_dt(unsigned long start) * also bump mem_reserve_cnt to cause further reservations to * fail since it's too late. */ - mem_end = _ALIGN(mem_end, PAGE_SIZE); + mem_end = ALIGN(mem_end, PAGE_SIZE); DBG("End of boot params: %x\n", mem_end); rsvmap[0] = mem_start; rsvmap[1] = mem_end; -- cgit v1.2.3-59-g8ed1b From 4cdb2da654033d76e1b1cb4ac427d9193dce816b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 20 Apr 2020 18:36:38 +0000 Subject: powerpc: Remove _ALIGN_UP(), _ALIGN_DOWN() and _ALIGN() These three powerpc macros have been replaced by equivalent generic macros and are not used anymore. Remove them. Signed-off-by: Christophe Leroy Reviewed-by: Joel Stanley Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/bb0a6081f7b95ee64ca20f92483e5b9661cbacb2.1587407777.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/page.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 3ee8df0f66e0..a63fe6f3a0ff 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -249,13 +249,6 @@ static inline bool pfn_valid(unsigned long pfn) #include #endif -/* align addr on a size boundary - adjust address up/down if needed */ -#define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size) -#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1))) - -/* align addr on a size boundary - adjust address up if needed */ -#define _ALIGN(addr,size) _ALIGN_UP(addr,size) - /* * Don't compare things with KERNELBASE or PAGE_OFFSET to test for * "kernelness", use is_kernel_addr() - it should do what you want. -- cgit v1.2.3-59-g8ed1b From edbadaf0671072298e506074128b64e003c5812c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 8 Apr 2020 15:58:49 +0000 Subject: powerpc/kasan: Fix stack overflow by increasing THREAD_SHIFT When CONFIG_KASAN is selected, the stack usage is increased. In the same way as x86 and arm64 architectures, increase THREAD_SHIFT when CONFIG_KASAN is selected. Fixes: 2edb16efc899 ("powerpc/32: Add KASAN support") Reported-by: Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://bugzilla.kernel.org/show_bug.cgi?id=207129 Link: https://lore.kernel.org/r/2c50f3b1c9bbaa4217c9a98f3044bd2a36c46a4f.1586361277.git.christophe.leroy@c-s.fr --- arch/powerpc/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8324d98728db..30e2111ca15d 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -772,6 +772,7 @@ config THREAD_SHIFT range 13 15 default "15" if PPC_256K_PAGES default "14" if PPC64 + default "14" if KASAN default "13" help Used to define the stack size. The default is almost always what you -- cgit v1.2.3-59-g8ed1b From feb9df3462e688d073848d85c8bb132fe8fd9ae5 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 7 May 2020 22:13:29 +1000 Subject: powerpc/64s: Always has full regs, so remove remnant checks Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200507121332.2233629-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 23 ++++++++++++++++------- arch/powerpc/kernel/process.c | 2 +- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index e0195e6b892b..89f31d5a8062 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -179,6 +179,20 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, #define current_pt_regs() \ ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1) + +#ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3S +#define TRAP(regs) ((regs)->trap) +#define FULL_REGS(regs) true +#define SET_FULL_REGS(regs) do { } while (0) +#else +#define TRAP(regs) ((regs)->trap & ~0x1) +#define FULL_REGS(regs) (((regs)->trap & 1) == 0) +#define SET_FULL_REGS(regs) ((regs)->trap |= 1) +#endif +#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs)) +#define NV_REG_POISON 0xdeadbeefdeadbeefUL +#else /* * We use the least-significant bit of the trap field to indicate * whether we have saved the full set of registers, or only a @@ -186,17 +200,12 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, * On 4xx we use the next bit to indicate whether the exception * is a critical exception (1 means it is). */ +#define TRAP(regs) ((regs)->trap & ~0xF) #define FULL_REGS(regs) (((regs)->trap & 1) == 0) -#ifndef __powerpc64__ +#define SET_FULL_REGS(regs) ((regs)->trap |= 1) #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0) #define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0) #define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0) -#endif /* ! __powerpc64__ */ -#define TRAP(regs) ((regs)->trap & ~0xF) -#ifdef __powerpc64__ -#define NV_REG_POISON 0xdeadbeefdeadbeefUL -#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) -#else #define NV_REG_POISON 0xdeadbeef #define CHECK_FULL_REGS(regs) \ do { \ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8479c762aef2..8af3583546b7 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1720,7 +1720,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) * FULL_REGS(regs) return true. This is necessary to allow * ptrace to examine the thread immediately after exec. */ - regs->trap &= ~1UL; + SET_FULL_REGS(regs); #ifdef CONFIG_PPC32 regs->mq = 0; -- cgit v1.2.3-59-g8ed1b From db30144b5c9cfb09c6b8b2fa7a9c351c94aa3433 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 7 May 2020 22:13:30 +1000 Subject: powerpc: Use set_trap() and avoid open-coding trap masking The pt_regs.trap field keeps 4 low bits for some metadata about the trap or how it was handled, which is masked off in order to test the architectural trap number. Add a set_trap() accessor to set this, equivalent to TRAP() for returning it. This is actually not quite the equivalent of TRAP() because it always clears the low bits, which may be harmless if it can only be updated via ptrace syscall, but it seems dangerous. In fact settting TRAP from ptrace doesn't seem like a great idea so maybe it's better deleted. Signed-off-by: Nicholas Piggin [mpe: Make it a static inline rather than a shouty macro] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200507121332.2233629-2-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 8 ++++++++ arch/powerpc/kernel/ptrace/ptrace-tm.c | 2 +- arch/powerpc/kernel/ptrace/ptrace-view.c | 2 +- arch/powerpc/xmon/xmon.c | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 89f31d5a8062..7c585bddc06e 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -182,10 +182,12 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3S +#define TRAP_FLAGS_MASK 0 #define TRAP(regs) ((regs)->trap) #define FULL_REGS(regs) true #define SET_FULL_REGS(regs) do { } while (0) #else +#define TRAP_FLAGS_MASK 0x1 #define TRAP(regs) ((regs)->trap & ~0x1) #define FULL_REGS(regs) (((regs)->trap & 1) == 0) #define SET_FULL_REGS(regs) ((regs)->trap |= 1) @@ -200,6 +202,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, * On 4xx we use the next bit to indicate whether the exception * is a critical exception (1 means it is). */ +#define TRAP_FLAGS_MASK 0xF #define TRAP(regs) ((regs)->trap & ~0xF) #define FULL_REGS(regs) (((regs)->trap & 1) == 0) #define SET_FULL_REGS(regs) ((regs)->trap |= 1) @@ -214,6 +217,11 @@ do { \ } while (0) #endif /* __powerpc64__ */ +static inline void set_trap(struct pt_regs *regs, unsigned long val) +{ + regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK); +} + #define arch_has_single_step() (1) #ifndef CONFIG_BOOK3S_601 #define arch_has_block_step() (true) diff --git a/arch/powerpc/kernel/ptrace/ptrace-tm.c b/arch/powerpc/kernel/ptrace/ptrace-tm.c index d75aff31f637..32d62c606681 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-tm.c +++ b/arch/powerpc/kernel/ptrace/ptrace-tm.c @@ -43,7 +43,7 @@ static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr) static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap) { - task->thread.ckpt_regs.trap = trap & 0xfff0; + set_trap(&task->thread.ckpt_regs, trap); return 0; } diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c index 15e3b79b6395..caeb5822a8f4 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-view.c +++ b/arch/powerpc/kernel/ptrace/ptrace-view.c @@ -149,7 +149,7 @@ static int set_user_dscr(struct task_struct *task, unsigned long dscr) */ static int set_user_trap(struct task_struct *task, unsigned long trap) { - task->thread.regs->trap = trap & 0xfff0; + set_trap(task->thread.regs, trap); return 0; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 7af840c0fc93..92761e47fb5c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1178,7 +1178,7 @@ static int do_step(struct pt_regs *regs) return 0; } if (stepped > 0) { - regs->trap = 0xd00 | (regs->trap & 1); + set_trap(regs, 0xd00); printf("stepped to "); xmon_print_symbol(regs->nip, " ", "\n"); ppc_inst_dump(regs->nip, 1, 0); -- cgit v1.2.3-59-g8ed1b From 912237ea166428edcbf3c137adf12cb987c477f2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 7 May 2020 22:13:31 +1000 Subject: powerpc: trap_is_syscall() helper to hide syscall trap number A new system call interrupt will be added with a new trap number. Hide the explicit 0xc00 test behind an accessor to reduce churn in callers. Signed-off-by: Nicholas Piggin [mpe: Make it a static inline] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200507121332.2233629-3-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 5 +++++ arch/powerpc/include/asm/syscall.h | 5 ++++- arch/powerpc/kernel/process.c | 2 +- arch/powerpc/kernel/signal.c | 2 +- arch/powerpc/xmon/xmon.c | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 7c585bddc06e..5db45790a087 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -222,6 +222,11 @@ static inline void set_trap(struct pt_regs *regs, unsigned long val) regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK); } +static inline bool trap_is_syscall(struct pt_regs *regs) +{ + return TRAP(regs) == 0xc00; +} + #define arch_has_single_step() (1) #ifndef CONFIG_BOOK3S_601 #define arch_has_block_step() (true) diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 38d62acfdce7..fd1b518eed17 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -26,7 +26,10 @@ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) * This is important for seccomp so that compat tasks can set r0 = -1 * to reject the syscall. */ - return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1; + if (trap_is_syscall(regs)) + return regs->gpr[0]; + else + return -1; } static inline void syscall_rollback(struct task_struct *task, diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8af3583546b7..db766252238f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1413,7 +1413,7 @@ void show_regs(struct pt_regs * regs) print_msr_bits(regs->msr); pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); trap = TRAP(regs); - if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) + if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR)) pr_cont("CFAR: "REG" ", regs->orig_gpr3); if (trap == 0x200 || trap == 0x300 || trap == 0x600) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index a264989626fd..f2be9e960c2e 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -198,7 +198,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, int restart = 1; /* syscall ? */ - if (TRAP(regs) != 0x0C00) + if (!trap_is_syscall(regs)) return; /* error signalled ? */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 92761e47fb5c..a7430632bab4 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1776,7 +1776,7 @@ static void prregs(struct pt_regs *fp) #endif printf("pc = "); xmon_print_symbol(fp->nip, " ", "\n"); - if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) { + if (!trap_is_syscall(fp) && cpu_has_feature(CPU_FTR_CFAR)) { printf("cfar= "); xmon_print_symbol(fp->orig_gpr3, " ", "\n"); } -- cgit v1.2.3-59-g8ed1b From 4e0e45b07d790253643ee05300784ab2156e2d5e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 7 May 2020 22:13:32 +1000 Subject: powerpc: Use trap metadata to prevent double restart rather than zeroing trap It's not very nice to zero trap for this, because then system calls no longer have trap_is_syscall(regs) invariant, and we can't distinguish between sc and scv system calls (in a later patch). Take one last unused bit from the low bits of the pt_regs.trap word for this instead. There is not a really good reason why it should be in trap as opposed to another field, but trap has some concept of flags and it exists. Ideally I think we would move trap to 2-byte field and have 2 more bytes available independently. Add a selftests case for this, which can be seen to fail if trap_norestart() is changed to return false. Signed-off-by: Nicholas Piggin [mpe: Make them static inlines] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200507121332.2233629-4-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 22 ++- arch/powerpc/kernel/signal.c | 7 +- arch/powerpc/kernel/signal_32.c | 2 +- arch/powerpc/kernel/signal_64.c | 10 +- tools/testing/selftests/powerpc/signal/Makefile | 2 +- .../powerpc/signal/sig_sc_double_restart.c | 174 +++++++++++++++++++++ 6 files changed, 201 insertions(+), 16 deletions(-) create mode 100644 tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 5db45790a087..ac3970fff0d5 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -182,13 +182,13 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3S -#define TRAP_FLAGS_MASK 0 -#define TRAP(regs) ((regs)->trap) +#define TRAP_FLAGS_MASK 0x10 +#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK) #define FULL_REGS(regs) true #define SET_FULL_REGS(regs) do { } while (0) #else -#define TRAP_FLAGS_MASK 0x1 -#define TRAP(regs) ((regs)->trap & ~0x1) +#define TRAP_FLAGS_MASK 0x11 +#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK) #define FULL_REGS(regs) (((regs)->trap & 1) == 0) #define SET_FULL_REGS(regs) ((regs)->trap |= 1) #endif @@ -202,8 +202,8 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, * On 4xx we use the next bit to indicate whether the exception * is a critical exception (1 means it is). */ -#define TRAP_FLAGS_MASK 0xF -#define TRAP(regs) ((regs)->trap & ~0xF) +#define TRAP_FLAGS_MASK 0x1F +#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK) #define FULL_REGS(regs) (((regs)->trap & 1) == 0) #define SET_FULL_REGS(regs) ((regs)->trap |= 1) #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0) @@ -227,6 +227,16 @@ static inline bool trap_is_syscall(struct pt_regs *regs) return TRAP(regs) == 0xc00; } +static inline bool trap_norestart(struct pt_regs *regs) +{ + return regs->trap & 0x10; +} + +static inline void set_trap_norestart(struct pt_regs *regs) +{ + regs->trap |= 0x10; +} + #define arch_has_single_step() (1) #ifndef CONFIG_BOOK3S_601 #define arch_has_block_step() (true) diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index f2be9e960c2e..a46c3fdb6853 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -201,6 +201,9 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, if (!trap_is_syscall(regs)) return; + if (trap_norestart(regs)) + return; + /* error signalled ? */ if (!(regs->ccr & 0x10000000)) return; @@ -258,7 +261,7 @@ static void do_signal(struct task_struct *tsk) if (ksig.sig <= 0) { /* No signal to deliver -- put the saved sigmask back */ restore_saved_sigmask(); - tsk->thread.regs->trap = 0; + set_trap_norestart(tsk->thread.regs); return; /* no signals delivered */ } @@ -285,7 +288,7 @@ static void do_signal(struct task_struct *tsk) ret = handle_rt_signal64(&ksig, oldset, tsk); } - tsk->thread.regs->trap = 0; + set_trap_norestart(tsk->thread.regs); signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP)); } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 4f96d29a22bf..ae3da7440b2f 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -500,7 +500,7 @@ static long restore_user_regs(struct pt_regs *regs, if (!sig) save_r2 = (unsigned int)regs->gpr[2]; err = restore_general_regs(regs, sr); - regs->trap = 0; + set_trap_norestart(regs); err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); if (!sig) regs->gpr[2] = (unsigned long) save_r2; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index adfde59cf4ba..77061915897f 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -350,8 +350,8 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); - /* skip SOFTE */ - regs->trap = 0; + /* Don't allow userspace to set SOFTE */ + set_trap_norestart(regs); err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); @@ -472,10 +472,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, &sc->gp_regs[PT_XER]); err |= __get_user(tsk->thread.ckpt_regs.ccr, &sc->gp_regs[PT_CCR]); - - /* Don't allow userspace to set the trap value */ - regs->trap = 0; - + /* Don't allow userspace to set SOFTE */ + set_trap_norestart(regs); /* These regs are not checkpointed; they can go in 'regs'. */ err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile index 932a032bf036..d6ae54663aed 100644 --- a/tools/testing/selftests/powerpc/signal/Makefile +++ b/tools/testing/selftests/powerpc/signal/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -TEST_GEN_PROGS := signal signal_tm sigfuz sigreturn_vdso +TEST_GEN_PROGS := signal signal_tm sigfuz sigreturn_vdso sig_sc_double_restart CFLAGS += -maltivec $(OUTPUT)/signal_tm: CFLAGS += -mhtm diff --git a/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c b/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c new file mode 100644 index 000000000000..e3972264615b --- /dev/null +++ b/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Test that a syscall does not get restarted twice, handled by trap_norestart() + * + * Based on Al's description, and a test for the bug fixed in this commit: + * + * commit 9a81c16b527528ad307843be5571111aa8d35a80 + * Author: Al Viro + * Date: Mon Sep 20 21:48:57 2010 +0100 + * + * powerpc: fix double syscall restarts + * + * Make sigreturn zero regs->trap, make do_signal() do the same on all + * paths. As it is, signal interrupting e.g. read() from fd 512 (== + * ERESTARTSYS) with another signal getting unblocked when the first + * handler finishes will lead to restart one insn earlier than it ought + * to. Same for multiple signals with in-kernel handlers interrupting + * that sucker at the same time. Same for multiple signals of any kind + * interrupting that sucker on 64bit... + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" + +static void SIGUSR1_handler(int sig) +{ + kill(getpid(), SIGUSR2); + /* + * SIGUSR2 is blocked until the handler exits, at which point it will + * be raised again and think there is a restart to be done because the + * pending restarted syscall has 512 (ERESTARTSYS) in r3. The second + * restart will retreat NIP another 4 bytes to fail case branch. + */ +} + +static void SIGUSR2_handler(int sig) +{ +} + +static ssize_t raw_read(int fd, void *buf, size_t count) +{ + register long nr asm("r0") = __NR_read; + register long _fd asm("r3") = fd; + register void *_buf asm("r4") = buf; + register size_t _count asm("r5") = count; + + asm volatile( +" b 0f \n" +" b 1f \n" +" 0: sc 0 \n" +" bns 2f \n" +" neg %0,%0 \n" +" b 2f \n" +" 1: \n" +" li %0,%4 \n" +" 2: \n" + : "+r"(_fd), "+r"(nr), "+r"(_buf), "+r"(_count) + : "i"(-ENOANO) + : "memory", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "cr0"); + + if (_fd < 0) { + errno = -_fd; + _fd = -1; + } + + return _fd; +} + +#define DATA "test 123" +#define DLEN (strlen(DATA)+1) + +int test_restart(void) +{ + int pipefd[2]; + pid_t pid; + char buf[512]; + + if (pipe(pipefd) == -1) { + perror("pipe"); + exit(EXIT_FAILURE); + } + + pid = fork(); + if (pid == -1) { + perror("fork"); + exit(EXIT_FAILURE); + } + + if (pid == 0) { /* Child reads from pipe */ + struct sigaction act; + int fd; + + memset(&act, 0, sizeof(act)); + sigaddset(&act.sa_mask, SIGUSR2); + act.sa_handler = SIGUSR1_handler; + act.sa_flags = SA_RESTART; + if (sigaction(SIGUSR1, &act, NULL) == -1) { + perror("sigaction"); + exit(EXIT_FAILURE); + } + + memset(&act, 0, sizeof(act)); + act.sa_handler = SIGUSR2_handler; + act.sa_flags = SA_RESTART; + if (sigaction(SIGUSR2, &act, NULL) == -1) { + perror("sigaction"); + exit(EXIT_FAILURE); + } + + /* Let's get ERESTARTSYS into r3 */ + while ((fd = dup(pipefd[0])) != 512) { + if (fd == -1) { + perror("dup"); + exit(EXIT_FAILURE); + } + } + + if (raw_read(fd, buf, 512) == -1) { + if (errno == ENOANO) { + fprintf(stderr, "Double restart moved restart before sc instruction.\n"); + _exit(EXIT_FAILURE); + } + perror("read"); + exit(EXIT_FAILURE); + } + + if (strncmp(buf, DATA, DLEN)) { + fprintf(stderr, "bad test string %s\n", buf); + exit(EXIT_FAILURE); + } + + return 0; + + } else { + int wstatus; + + usleep(100000); /* Hack to get reader waiting */ + kill(pid, SIGUSR1); + usleep(100000); + if (write(pipefd[1], DATA, DLEN) != DLEN) { + perror("write"); + exit(EXIT_FAILURE); + } + close(pipefd[0]); + close(pipefd[1]); + if (wait(&wstatus) == -1) { + perror("wait"); + exit(EXIT_FAILURE); + } + if (!WIFEXITED(wstatus)) { + fprintf(stderr, "child exited abnormally\n"); + exit(EXIT_FAILURE); + } + + FAIL_IF(WEXITSTATUS(wstatus) != EXIT_SUCCESS); + + return 0; + } +} + +int main(void) +{ + test_harness_set_timeout(10); + return test_harness(test_restart, "sig sys restart"); +} -- cgit v1.2.3-59-g8ed1b From 0f6be41c60699fd8cdfa93e5e85a306cec1ac1d0 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 13:57:49 -0500 Subject: powerpc: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200507185749.GA14994@embeddedor --- arch/powerpc/platforms/powermac/nvram.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index dc7a5bae8f1c..853ccc4480e2 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -55,7 +55,7 @@ struct chrp_header { u8 cksum; u16 len; char name[12]; - u8 data[0]; + u8 data[]; }; struct core99_header { -- cgit v1.2.3-59-g8ed1b From 02bddf21c34d0a918acc8647195ba4507e3db8fc Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 13:57:55 -0500 Subject: powerpc/mm: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200507185755.GA15014@embeddedor --- arch/powerpc/mm/hugetlbpage.c | 2 +- tools/testing/selftests/powerpc/pmu/ebb/trace.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 33b3461d91e8..d06efb946c7d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -253,7 +253,7 @@ int __init alloc_bootmem_huge_page(struct hstate *h) struct hugepd_freelist { struct rcu_head rcu; unsigned int index; - void *ptes[0]; + void *ptes[]; }; static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.h b/tools/testing/selftests/powerpc/pmu/ebb/trace.h index 7c0fb5d2bdb1..da2a3be5441f 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/trace.h +++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.h @@ -18,7 +18,7 @@ struct trace_entry { u8 type; u8 length; - u8 data[0]; + u8 data[]; }; struct trace_buffer @@ -26,7 +26,7 @@ struct trace_buffer u64 size; bool overflow; void *tail; - u8 data[0]; + u8 data[]; }; struct trace_buffer *trace_buffer_allocate(u64 size); -- cgit v1.2.3-59-g8ed1b From 7ffa8b7dc11752827329e4e84a574ea6aaf24716 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 28 Apr 2020 22:31:30 +1000 Subject: powerpc/64: Don't initialise init_task->thread.regs Aneesh increased the size of struct pt_regs by 16 bytes and started seeing this WARN_ON: smp: Bringing up secondary CPUs ... ------------[ cut here ]------------ WARNING: CPU: 0 PID: 0 at arch/powerpc/kernel/process.c:455 giveup_all+0xb4/0x110 Modules linked in: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.7.0-rc2-gcc-8.2.0-1.g8f6a41f-default+ #318 NIP: c00000000001a2b4 LR: c00000000001a29c CTR: c0000000031d0000 REGS: c0000000026d3980 TRAP: 0700 Not tainted (5.7.0-rc2-gcc-8.2.0-1.g8f6a41f-default+) MSR: 800000000282b033 CR: 48048224 XER: 00000000 CFAR: c000000000019cc8 IRQMASK: 1 GPR00: c00000000001a264 c0000000026d3c20 c0000000026d7200 800000000280b033 GPR04: 0000000000000001 0000000000000000 0000000000000077 30206d7372203164 GPR08: 0000000000002000 0000000002002000 800000000280b033 3230303030303030 GPR12: 0000000000008800 c0000000031d0000 0000000000800050 0000000002000066 GPR16: 000000000309a1a0 000000000309a4b0 000000000309a2d8 000000000309a890 GPR20: 00000000030d0098 c00000000264da40 00000000fd620000 c0000000ff798080 GPR24: c00000000264edf0 c0000001007469f0 00000000fd620000 c0000000020e5e90 GPR28: c00000000264edf0 c00000000264d200 000000001db60000 c00000000264d200 NIP [c00000000001a2b4] giveup_all+0xb4/0x110 LR [c00000000001a29c] giveup_all+0x9c/0x110 Call Trace: [c0000000026d3c20] [c00000000001a264] giveup_all+0x64/0x110 (unreliable) [c0000000026d3c90] [c00000000001ae34] __switch_to+0x104/0x480 [c0000000026d3cf0] [c000000000e0b8a0] __schedule+0x320/0x970 [c0000000026d3dd0] [c000000000e0c518] schedule_idle+0x38/0x70 [c0000000026d3df0] [c00000000019c7c8] do_idle+0x248/0x3f0 [c0000000026d3e70] [c00000000019cbb8] cpu_startup_entry+0x38/0x40 [c0000000026d3ea0] [c000000000011bb0] rest_init+0xe0/0xf8 [c0000000026d3ed0] [c000000002004820] start_kernel+0x990/0x9e0 [c0000000026d3f90] [c00000000000c49c] start_here_common+0x1c/0x400 Which was unexpected. The warning is checking the thread.regs->msr value of the task we are switching from: usermsr = tsk->thread.regs->msr; ... WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); ie. if MSR_VSX is set then both of MSR_FP and MSR_VEC are also set. Dumping tsk->thread.regs->msr we see that it's: 0x1db60000 Which is not a normal looking MSR, in fact the only valid bit is MSR_VSX, all the other bits are reserved in the current definition of the MSR. We can see from the oops that it was swapper/0 that we were switching from when we hit the warning, ie. init_task. So its thread.regs points to the base (high addresses) in init_stack. Dumping the content of init_task->thread.regs, with the members of pt_regs annotated (the 16 bytes larger version), we see: 0000000000000000 c000000002780080 gpr[0] gpr[1] 0000000000000000 c000000002666008 gpr[2] gpr[3] c0000000026d3ed0 0000000000000078 gpr[4] gpr[5] c000000000011b68 c000000002780080 gpr[6] gpr[7] 0000000000000000 0000000000000000 gpr[8] gpr[9] c0000000026d3f90 0000800000002200 gpr[10] gpr[11] c000000002004820 c0000000026d7200 gpr[12] gpr[13] 000000001db60000 c0000000010aabe8 gpr[14] gpr[15] c0000000010aabe8 c0000000010aabe8 gpr[16] gpr[17] c00000000294d598 0000000000000000 gpr[18] gpr[19] 0000000000000000 0000000000001ff8 gpr[20] gpr[21] 0000000000000000 c00000000206d608 gpr[22] gpr[23] c00000000278e0cc 0000000000000000 gpr[24] gpr[25] 000000002fff0000 c000000000000000 gpr[26] gpr[27] 0000000002000000 0000000000000028 gpr[28] gpr[29] 000000001db60000 0000000004750000 gpr[30] gpr[31] 0000000002000000 000000001db60000 nip msr 0000000000000000 0000000000000000 orig_r3 ctr c00000000000c49c 0000000000000000 link xer 0000000000000000 0000000000000000 ccr softe 0000000000000000 0000000000000000 trap dar 0000000000000000 0000000000000000 dsisr result 0000000000000000 0000000000000000 ppr kuap 0000000000000000 0000000000000000 pad[2] pad[3] This looks suspiciously like stack frames, not a pt_regs. If we look closely we can see return addresses from the stack trace above, c000000002004820 (start_kernel) and c00000000000c49c (start_here_common). init_task->thread.regs is setup at build time in processor.h: #define INIT_THREAD { \ .ksp = INIT_SP, \ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ The early boot code where we setup the initial stack is: LOAD_REG_ADDR(r3,init_thread_union) /* set up a stack pointer */ LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) add r1,r3,r1 li r0,0 stdu r0,-STACK_FRAME_OVERHEAD(r1) Which creates a stack frame of size 112 bytes (STACK_FRAME_OVERHEAD). Which is far too small to contain a pt_regs. So the result is init_task->thread.regs is pointing at some stack frames on the init stack, not at a pt_regs. We have gotten away with this for so long because with pt_regs at its current size the MSR happens to point into the first frame, at a location that is not written to by the early asm. With the 16 byte expansion the MSR falls into the second frame, which is used by the compiler, and collides with a saved register that tends to be non-zero. As far as I can see this has been wrong since the original merge of 64-bit ppc support, back in 2002. Conceptually swapper should have no regs, it never entered from userspace, and in fact that's what we do on 32-bit. It's also presumably what the "bogus" comment is referring to. So I think the right fix is to just not-initialise regs at all. I'm slightly worried this will break some code that isn't prepared for a NULL regs, but we'll have to see. Remove the comment in head_64.S which refers to us setting up the regs (even though we never did), and is otherwise not really accurate any more. Reported-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200428123130.73078-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/processor.h | 1 - arch/powerpc/kernel/head_64.S | 9 +-------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index bfa336fbcfeb..a51964b4ec42 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -300,7 +300,6 @@ struct thread_struct { #else #define INIT_THREAD { \ .ksp = INIT_SP, \ - .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .addr_limit = KERNEL_DS, \ .fpexc_mode = 0, \ .fscr = FSCR_TAR | FSCR_EBB \ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index ddfbd02140d9..0e05a9a47a4b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -947,15 +947,8 @@ start_here_multiplatform: std r0,0(r4) #endif - /* The following gets the stack set up with the regs */ - /* pointing to the real addr of the kernel stack. This is */ - /* all done to support the C function call below which sets */ - /* up the htab. This is done because we have relocated the */ - /* kernel but are still running in real mode. */ - - LOAD_REG_ADDR(r3,init_thread_union) - /* set up a stack pointer */ + LOAD_REG_ADDR(r3,init_thread_union) LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) add r1,r3,r1 li r0,0 -- cgit v1.2.3-59-g8ed1b From 24ac99e97fa7b8f0db9b48413a76def9cf73295c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 28 Apr 2020 22:31:52 +1000 Subject: powerpc: Drop unneeded cast in task_pt_regs() There's no need to cast in task_pt_regs() as tsk->thread.regs should already be a struct pt_regs. If someone's using task_pt_regs() on something that's not a task but happens to have a thread.regs then we'll deal with them later. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200428123152.73566-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index a51964b4ec42..5ab202055d5a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -306,7 +306,7 @@ struct thread_struct { } #endif -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) +#define task_pt_regs(tsk) ((tsk)->thread.regs) unsigned long get_wchan(struct task_struct *p); -- cgit v1.2.3-59-g8ed1b From 7481cad4747303442209bc5dba2f56c3afcea07d Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sun, 26 Apr 2020 21:44:10 +1000 Subject: selftests/powerpc: Add a test of counting larx/stcx This is based on the count_instructions test. However this one also counts the number of failed stcx's, and in conjunction with knowing the size of the stcx loop, can calculate the total number of instructions executed even in the face of non-deterministic stcx failures. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200426114410.3917383-1-mpe@ellerman.id.au --- tools/testing/selftests/powerpc/pmu/.gitignore | 1 + tools/testing/selftests/powerpc/pmu/Makefile | 8 +- .../selftests/powerpc/pmu/count_stcx_fail.c | 161 +++++++++++++++++++++ tools/testing/selftests/powerpc/pmu/loop.S | 35 +++++ 4 files changed, 203 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/powerpc/pmu/count_stcx_fail.c diff --git a/tools/testing/selftests/powerpc/pmu/.gitignore b/tools/testing/selftests/powerpc/pmu/.gitignore index ff7896903d7b..f69b1e2641a1 100644 --- a/tools/testing/selftests/powerpc/pmu/.gitignore +++ b/tools/testing/selftests/powerpc/pmu/.gitignore @@ -2,3 +2,4 @@ count_instructions l3_bank_test per_event_excludes +count_stcx_fail diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index 19046db995fe..904672fb78dd 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile @@ -2,7 +2,7 @@ noarg: $(MAKE) -C ../ -TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes +TEST_GEN_PROGS := count_instructions count_stcx_fail l3_bank_test per_event_excludes EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c top_srcdir = ../../../../.. @@ -13,8 +13,12 @@ all: $(TEST_GEN_PROGS) ebb $(TEST_GEN_PROGS): $(EXTRA_SOURCES) # loop.S can only be built 64-bit +$(OUTPUT)/count_instructions: CFLAGS += -m64 $(OUTPUT)/count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES) - $(CC) $(CFLAGS) -m64 -o $@ $^ + +$(OUTPUT)/count_stcx_fail: CFLAGS += -m64 +$(OUTPUT)/count_stcx_fail: loop.S $(EXTRA_SOURCES) + $(OUTPUT)/per_event_excludes: ../utils.c diff --git a/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c b/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c new file mode 100644 index 000000000000..7b4ac4537702 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c @@ -0,0 +1,161 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#define _GNU_SOURCE + +#include +#include +#include +#include + +#include "event.h" +#include "utils.h" +#include "lib.h" + +extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target); + +static void setup_event(struct event *e, u64 config, int type, char *name) +{ + event_init_opts(e, config, type, name); + + e->attr.disabled = 1; + e->attr.exclude_kernel = 1; + e->attr.exclude_hv = 1; + e->attr.exclude_idle = 1; +} + +static int do_count_loop(struct event *events, u64 instructions, + u64 overhead, bool report) +{ + s64 difference, expected; + double percentage; + u64 dummy; + + prctl(PR_TASK_PERF_EVENTS_ENABLE); + + /* Run for 1M instructions */ + thirty_two_instruction_loop_with_ll_sc(instructions >> 5, &dummy); + + prctl(PR_TASK_PERF_EVENTS_DISABLE); + + event_read(&events[0]); + event_read(&events[1]); + event_read(&events[2]); + + expected = instructions + overhead + (events[2].result.value * 10); + difference = events[0].result.value - expected; + percentage = (double)difference / events[0].result.value * 100; + + if (report) { + printf("-----\n"); + event_report(&events[0]); + event_report(&events[1]); + event_report(&events[2]); + + printf("Looped for %llu instructions, overhead %llu\n", instructions, overhead); + printf("Expected %llu\n", expected); + printf("Actual %llu\n", events[0].result.value); + printf("Delta %lld, %f%%\n", difference, percentage); + } + + event_reset(&events[0]); + event_reset(&events[1]); + event_reset(&events[2]); + + if (difference < 0) + difference = -difference; + + /* Tolerate a difference below 0.0001 % */ + difference *= 10000 * 100; + if (difference / events[0].result.value) + return -1; + + return 0; +} + +/* Count how many instructions it takes to do a null loop */ +static u64 determine_overhead(struct event *events) +{ + u64 current, overhead; + int i; + + do_count_loop(events, 0, 0, false); + overhead = events[0].result.value; + + for (i = 0; i < 100; i++) { + do_count_loop(events, 0, 0, false); + current = events[0].result.value; + if (current < overhead) { + printf("Replacing overhead %llu with %llu\n", overhead, current); + overhead = current; + } + } + + return overhead; +} + +#define PM_MRK_STCX_FAIL 0x03e158 +#define PM_STCX_FAIL 0x01e058 + +static int test_body(void) +{ + struct event events[3]; + u64 overhead; + + setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions"); + setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "cycles"); + setup_event(&events[2], PM_STCX_FAIL, PERF_TYPE_RAW, "stcx_fail"); + + if (event_open(&events[0])) { + perror("perf_event_open"); + return -1; + } + + if (event_open_with_group(&events[1], events[0].fd)) { + perror("perf_event_open"); + return -1; + } + + if (event_open_with_group(&events[2], events[0].fd)) { + perror("perf_event_open"); + return -1; + } + + overhead = determine_overhead(events); + printf("Overhead of null loop: %llu instructions\n", overhead); + + /* Run for 1Mi instructions */ + FAIL_IF(do_count_loop(events, 1000000, overhead, true)); + + /* Run for 10Mi instructions */ + FAIL_IF(do_count_loop(events, 10000000, overhead, true)); + + /* Run for 100Mi instructions */ + FAIL_IF(do_count_loop(events, 100000000, overhead, true)); + + /* Run for 1Bi instructions */ + FAIL_IF(do_count_loop(events, 1000000000, overhead, true)); + + /* Run for 16Bi instructions */ + FAIL_IF(do_count_loop(events, 16000000000, overhead, true)); + + /* Run for 64Bi instructions */ + FAIL_IF(do_count_loop(events, 64000000000, overhead, true)); + + event_close(&events[0]); + event_close(&events[1]); + + return 0; +} + +static int count_ll_sc(void) +{ + return eat_cpu(test_body); +} + +int main(void) +{ + return test_harness(count_ll_sc, "count_ll_sc"); +} diff --git a/tools/testing/selftests/powerpc/pmu/loop.S b/tools/testing/selftests/powerpc/pmu/loop.S index 8cc9b5e2c9de..c52ba09b6fed 100644 --- a/tools/testing/selftests/powerpc/pmu/loop.S +++ b/tools/testing/selftests/powerpc/pmu/loop.S @@ -41,3 +41,38 @@ FUNC_START(thirty_two_instruction_loop) subi r3,r3,1 b FUNC_NAME(thirty_two_instruction_loop) FUNC_END(thirty_two_instruction_loop) + +FUNC_START(thirty_two_instruction_loop_with_ll_sc) + cmpdi r3,0 + beqlr + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 # 5 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 +1: ldarx r6,0,r4 # 10 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 # 15 + addi r5,r5,1 + addi r5,r5,1 + stdcx. r6,0,r4 + bne- 1b + addi r5,r5,1 # 20 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 # 25 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 + addi r5,r5,1 # 30 + subi r3,r3,1 + b FUNC_NAME(thirty_two_instruction_loop_with_ll_sc) +FUNC_END(thirty_two_instruction_loop_with_ll_sc) -- cgit v1.2.3-59-g8ed1b From 93900337b9ac2f4eca427eff6d187be2dc3b5551 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 23 Apr 2020 16:00:38 +1000 Subject: drivers/macintosh: Fix memleak in windfarm_pm112 driver create_cpu_loop() calls smu_sat_get_sdb_partition() which does kmalloc() and returns the allocated buffer. In fact it's called twice, and neither buffer is freed. This results in a memory leak as reported by Erhard: unreferenced object 0xc00000047081f840 (size 32): comm "kwindfarm", pid 203, jiffies 4294880630 (age 5552.877s) hex dump (first 32 bytes): c8 06 02 7f ff 02 ff 01 fb bf 00 41 00 20 00 00 ...........A. .. 00 07 89 37 00 a0 00 00 00 00 00 00 00 00 00 00 ...7............ backtrace: [<0000000083f0a65c>] .smu_sat_get_sdb_partition+0xc4/0x2d0 [windfarm_smu_sat] [<000000003010fcb7>] .pm112_wf_notify+0x104c/0x13bc [windfarm_pm112] [<00000000b958b2dd>] .notifier_call_chain+0xa8/0x180 [<0000000070490868>] .blocking_notifier_call_chain+0x64/0x90 [<00000000131d8149>] .wf_thread_func+0x114/0x1a0 [<000000000d54838d>] .kthread+0x13c/0x190 [<00000000669b72bc>] .ret_from_kernel_thread+0x58/0x64 unreferenced object 0xc0000004737089f0 (size 16): comm "kwindfarm", pid 203, jiffies 4294880879 (age 5552.050s) hex dump (first 16 bytes): c4 04 01 7f 22 11 e0 e6 ff 55 7b 12 ec 11 00 00 ...."....U{..... backtrace: [<0000000083f0a65c>] .smu_sat_get_sdb_partition+0xc4/0x2d0 [windfarm_smu_sat] [<00000000b94ef7e1>] .pm112_wf_notify+0x1294/0x13bc [windfarm_pm112] [<00000000b958b2dd>] .notifier_call_chain+0xa8/0x180 [<0000000070490868>] .blocking_notifier_call_chain+0x64/0x90 [<00000000131d8149>] .wf_thread_func+0x114/0x1a0 [<000000000d54838d>] .kthread+0x13c/0x190 [<00000000669b72bc>] .ret_from_kernel_thread+0x58/0x64 Fix it by rearranging the logic so we deal with each buffer separately, which then makes it easy to free the buffer once we're done with it. Fixes: ac171c46667c ("[PATCH] powerpc: Thermal control for dual core G5s") Cc: stable@vger.kernel.org # v2.6.16+ Reported-by: Erhard F. Signed-off-by: Michael Ellerman Tested-by: Erhard F. Link: https://lore.kernel.org/r/20200423060038.3308530-1-mpe@ellerman.id.au --- drivers/macintosh/windfarm_pm112.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c index 4150301a89a5..e8377ce0a95a 100644 --- a/drivers/macintosh/windfarm_pm112.c +++ b/drivers/macintosh/windfarm_pm112.c @@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu) s32 tmax; int fmin; - /* Get PID params from the appropriate SAT */ - hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); - if (hdr == NULL) { - printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); - return -EINVAL; - } - piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; - /* Get FVT params to get Tmax; if not found, assume default */ hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); if (hdr) { @@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu) if (tmax < cpu_all_tmax) cpu_all_tmax = tmax; + kfree(hdr); + + /* Get PID params from the appropriate SAT */ + hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); + if (hdr == NULL) { + printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); + return -EINVAL; + } + piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; + /* * Darwin has a minimum fan speed of 1000 rpm for the 4-way and * 515 for the 2-way. That appears to be overkill, so for now, @@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu) pid.min = fmin; wf_cpu_pid_init(&cpu_pid[cpu], &pid); + + kfree(hdr); + return 0; } -- cgit v1.2.3-59-g8ed1b From d93e5e2d03d4f41dfedb92200a2c0413ab8ee4e7 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 2 Apr 2020 23:49:29 +1100 Subject: powerpc/64: Update Speculation_Store_Bypass in /proc//status Currently we don't report anything useful in /proc//status: $ grep Speculation_Store_Bypass /proc/self/status Speculation_Store_Bypass: unknown Our mitigation is currently always a barrier instruction, which doesn't map that well onto the existing possibilities for the PR_SPEC values. However even if we added a "barrier" type PR_SPEC value, userspace would still need to consult some other source to work out which type of barrier to use. So reporting "vulnerable" seems sufficient, as userspace can see that and then consult its source to determine what barrier to use. Signed-off-by: Gustavo Walbon Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200402124929.3574166-1-mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index bd70f5be1c27..479325baf6a9 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include @@ -353,6 +355,40 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute * return sprintf(buf, "Vulnerable\n"); } +static int ssb_prctl_get(struct task_struct *task) +{ + if (stf_enabled_flush_types == STF_BARRIER_NONE) + /* + * We don't have an explicit signal from firmware that we're + * vulnerable or not, we only have certain CPU revisions that + * are known to be vulnerable. + * + * We assume that if we're on another CPU, where the barrier is + * NONE, then we are not vulnerable. + */ + return PR_SPEC_NOT_AFFECTED; + else + /* + * If we do have a barrier type then we are vulnerable. The + * barrier is not a global or per-process mitigation, so the + * only value we can report here is PR_SPEC_ENABLE, which + * appears as "vulnerable" in /proc. + */ + return PR_SPEC_ENABLE; + + return -EINVAL; +} + +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssb_prctl_get(task); + default: + return -ENODEV; + } +} + #ifdef CONFIG_DEBUG_FS static int stf_barrier_set(void *data, u64 val) { -- cgit v1.2.3-59-g8ed1b From 6fa13640aea7bb0760846981aa2da4245307bd26 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Tue, 28 Apr 2020 13:45:05 +1000 Subject: powerpc/eeh: Fix pseries_eeh_configure_bridge() If a device is hot unplgged during EEH recovery, it's possible for the RTAS call to ibm,configure-pe in pseries_eeh_configure() to return parameter error (-3), however negative return values are not checked for and this leads to an infinite loop. Fix this by correctly bailing out on negative values. Signed-off-by: Sam Bobroff Signed-off-by: Michael Ellerman Reviewed-by: Nathan Lynch Link: https://lore.kernel.org/r/1b0a6010a647dc915816e44845b64d72066676a7.1588045502.git.sbobroff@linux.ibm.com --- arch/powerpc/platforms/pseries/eeh_pseries.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 845342814edc..ace117f99d94 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -664,6 +664,8 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) if (!ret) return ret; + if (ret < 0) + break; /* * If RTAS returns a delay value that's above 100ms, cut it @@ -684,7 +686,11 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n", __func__, pe->phb->global_number, pe->addr, ret); - return ret; + /* PAPR defines -3 as "Parameter Error" for this function: */ + if (ret == -3) + return -EINVAL; + else + return -EIO; } /** -- cgit v1.2.3-59-g8ed1b From 466381ecdc741b1767d980e10b1ec49f6bde56f3 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Tue, 28 Apr 2020 13:45:06 +1000 Subject: powerpc/eeh: Release EEH device state synchronously EEH device state is currently removed (by eeh_remove_device()) during the device release handler, which is invoked as the device's reference count drops to zero. This may take some time, or forever, as other threads may hold references. However, the PCI device state is released synchronously by pci_stop_and_remove_bus_device(). This mismatch causes problems, for example the device may be re-discovered as a new device before the release handler has been called, leaving the PCI and EEH state mismatched. So instead, call eeh_remove_device() from the bus device removal handlers, which are called synchronously in the removal path. Signed-off-by: Sam Bobroff Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/0a1f5105d3a33b1c090bba31de63eb0cdd25de7b.1588045502.git.sbobroff@linux.ibm.com --- arch/powerpc/kernel/eeh.c | 31 +++++++++++++++++++++++++++++++ arch/powerpc/kernel/pci-hotplug.c | 2 -- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 7cdcb413bb44..d407981dec76 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1106,6 +1106,37 @@ static int eeh_init(void) core_initcall_sync(eeh_init); +static int eeh_device_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + switch (action) { + /* + * Note: It's not possible to perform EEH device addition (i.e. + * {pseries,pnv}_pcibios_bus_add_device()) here because it depends on + * the device's resources, which have not yet been set up. + */ + case BUS_NOTIFY_DEL_DEVICE: + eeh_remove_device(to_pci_dev(dev)); + break; + default: + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block eeh_device_nb = { + .notifier_call = eeh_device_notifier, +}; + +static __init int eeh_set_bus_notifier(void) +{ + bus_register_notifier(&pci_bus_type, &eeh_device_nb); + return 0; +} +arch_initcall(eeh_set_bus_notifier); + /** * eeh_probe_device() - Perform EEH initialization for the indicated pci device * @dev: pci device for which to set up EEH diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index bf83f76563a3..2fc12198ec07 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c @@ -57,8 +57,6 @@ void pcibios_release_device(struct pci_dev *dev) struct pci_controller *phb = pci_bus_to_host(dev->bus); struct pci_dn *pdn = pci_get_pdn(dev); - eeh_remove_device(dev); - if (phb->controller_ops.release_device) phb->controller_ops.release_device(dev); -- cgit v1.2.3-59-g8ed1b From 8a5054d8cbbe03c68dcb0957c291c942132e4101 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:33:53 +1000 Subject: powerpc/64s/exception: Fix machine check no-loss idle wakeup The architecture allows for machine check exceptions to cause idle wakeups which resume at the 0x200 address which has to return via the idle wakeup code, but the early machine check handler is run first. The case of a no state-loss sleep is broken because the early handler uses non-volatile register r1 , which is needed for the wakeup protocol, but it is not restored. Fix this by loading r1 from the MCE exception frame before returning to the idle wakeup code. Also update the comment which has become stale since the idle rewrite in C. This crash was found and fix confirmed with a machine check injection test in qemu powernv model (which is not upstream in qemu yet). Fixes: 10d91611f426d ("powerpc/64s: Reimplement book3s idle code in C") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200508043408.886394-2-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 728ccb0f560c..bbf3109c5cba 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1224,17 +1224,19 @@ EXC_COMMON_BEGIN(machine_check_idle_common) bl machine_check_queue_event /* - * We have not used any non-volatile GPRs here, and as a rule - * most exception code including machine check does not. - * Therefore PACA_NAPSTATELOST does not need to be set. Idle - * wakeup will restore volatile registers. + * GPR-loss wakeups are relatively straightforward, because the + * idle sleep code has saved all non-volatile registers on its + * own stack, and r1 in PACAR1. * - * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce. + * For no-loss wakeups the r1 and lr registers used by the + * early machine check handler have to be restored first. r2 is + * the kernel TOC, so no need to restore it. * * Then decrement MCE nesting after finishing with the stack. */ ld r3,_MSR(r1) ld r4,_LINK(r1) + ld r1,GPR1(r1) lhz r11,PACA_IN_MCE(r13) subi r11,r11,1 @@ -1243,7 +1245,7 @@ EXC_COMMON_BEGIN(machine_check_idle_common) mtlr r4 rlwinm r10,r3,47-31,30,31 cmpwi cr1,r10,2 - bltlr cr1 /* no state loss, return to idle caller */ + bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ b idle_return_gpr_loss #endif -- cgit v1.2.3-59-g8ed1b From ac2a2a1417391180ef12f908a2864692d6d76d40 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:33:54 +1000 Subject: powerpc/64s/exceptions: Fix in_mce accounting in unrecoverable path Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Acked-by: Mahesh Salgaonkar Link: https://lore.kernel.org/r/20200508043408.886394-3-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index bbf3109c5cba..3322000316ab 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1267,6 +1267,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) andc r10,r10,r3 mtmsrd r10 + lhz r12,PACA_IN_MCE(r13) + subi r12,r12,1 + sth r12,PACA_IN_MCE(r13) + /* Invoke machine_check_exception to print MCE event and panic. */ addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_exception -- cgit v1.2.3-59-g8ed1b From 16754d25bd7d4e53a52b311d99cc7a8fba875d81 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:33:55 +1000 Subject: powerpc/64s/exceptions: Change irq reconcile for NMIs from reusing _DAR to RESULT A spare interrupt stack slot is needed to save irq state when reconciling NMIs (sreset and decrementer soft-nmi). _DAR is used for this, but we want to reconcile machine checks as well, which do use _DAR. Switch to using RESULT instead, as it's used by system calls. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200508043408.886394-4-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3322000316ab..a42b73efb1a9 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -939,13 +939,13 @@ EXC_COMMON_BEGIN(system_reset_common) * the right thing. We do not want to reconcile because that goes * through irq tracing which we don't want in NMI. * - * Save PACAIRQHAPPENED to _DAR (otherwise unused), and set HARD_DIS + * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS * as we are running with MSR[EE]=0. */ li r10,IRQS_ALL_DISABLED stb r10,PACAIRQSOFTMASK(r13) lbz r10,PACAIRQHAPPENED(r13) - std r10,_DAR(r1) + std r10,RESULT(r1) ori r10,r10,PACA_IRQ_HARD_DIS stb r10,PACAIRQHAPPENED(r13) @@ -966,7 +966,7 @@ EXC_COMMON_BEGIN(system_reset_common) /* * Restore soft mask settings. */ - ld r10,_DAR(r1) + ld r10,RESULT(r1) stb r10,PACAIRQHAPPENED(r13) ld r10,SOFTE(r1) stb r10,PACAIRQSOFTMASK(r13) @@ -2743,7 +2743,7 @@ EXC_COMMON_BEGIN(soft_nmi_common) li r10,IRQS_ALL_DISABLED stb r10,PACAIRQSOFTMASK(r13) lbz r10,PACAIRQHAPPENED(r13) - std r10,_DAR(r1) + std r10,RESULT(r1) ori r10,r10,PACA_IRQ_HARD_DIS stb r10,PACAIRQHAPPENED(r13) @@ -2757,7 +2757,7 @@ EXC_COMMON_BEGIN(soft_nmi_common) /* * Restore soft mask settings. */ - ld r10,_DAR(r1) + ld r10,RESULT(r1) stb r10,PACAIRQHAPPENED(r13) ld r10,SOFTE(r1) stb r10,PACAIRQSOFTMASK(r13) -- cgit v1.2.3-59-g8ed1b From f0fd9dd3c213c947dfb5bc2cad3ef5e30d3258ec Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:33:56 +1000 Subject: powerpc/64s/exceptions: Machine check reconcile irq state pseries fwnmi machine check code pops the soft-irq checks in rtas_call (after the next patch to remove rtas_token from this call path). Rather than play whack a mole with these and forever having fragile code, it seems better to have the early machine check handler perform the same kind of reconcile as the other NMI interrupts. WARNING: CPU: 0 PID: 493 at arch/powerpc/kernel/irq.c:343 CPU: 0 PID: 493 Comm: a Tainted: G W NIP: c00000000001ed2c LR: c000000000042c40 CTR: 0000000000000000 REGS: c0000001fffd38b0 TRAP: 0700 Tainted: G W MSR: 8000000000021003 CR: 28000488 XER: 00000000 CFAR: c00000000001ec90 IRQMASK: 0 GPR00: c000000000043820 c0000001fffd3b40 c0000000012ba300 0000000000000000 GPR04: 0000000048000488 0000000000000000 0000000000000000 00000000deadbeef GPR08: 0000000000000080 0000000000000000 0000000000000000 0000000000001001 GPR12: 0000000000000000 c0000000014a0000 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR24: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR28: 0000000000000000 0000000000000001 c000000001360810 0000000000000000 NIP [c00000000001ed2c] arch_local_irq_restore.part.0+0xac/0x100 LR [c000000000042c40] unlock_rtas+0x30/0x90 Call Trace: [c0000001fffd3b40] [c000000001360810] 0xc000000001360810 (unreliable) [c0000001fffd3b60] [c000000000043820] rtas_call+0x1c0/0x280 [c0000001fffd3bb0] [c0000000000dc328] fwnmi_release_errinfo+0x38/0x70 [c0000001fffd3c10] [c0000000000dcd8c] pseries_machine_check_realmode+0x1dc/0x540 [c0000001fffd3cd0] [c00000000003fe04] machine_check_early+0x54/0x70 [c0000001fffd3d00] [c000000000008384] machine_check_early_common+0x134/0x1f0 --- interrupt: 200 at 0x13f1307c8 LR = 0x7fff888b8528 Instruction dump: 60000000 7d2000a6 71298000 41820068 39200002 7d210164 4bffff9c 60000000 60000000 7d2000a6 71298000 4c820020 <0fe00000> 4e800020 60000000 60000000 Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200508043408.886394-5-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a42b73efb1a9..072772803b7c 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1116,11 +1116,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) li r10,MSR_RI mtmsrd r10,1 + /* + * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see + * system_reset_common) + */ + li r10,IRQS_ALL_DISABLED + stb r10,PACAIRQSOFTMASK(r13) + lbz r10,PACAIRQHAPPENED(r13) + std r10,RESULT(r1) + ori r10,r10,PACA_IRQ_HARD_DIS + stb r10,PACAIRQHAPPENED(r13) + addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_early std r3,RESULT(r1) /* Save result */ ld r12,_MSR(r1) + /* + * Restore soft mask settings. + */ + ld r10,RESULT(r1) + stb r10,PACAIRQHAPPENED(r13) + ld r10,SOFTE(r1) + stb r10,PACAIRQSOFTMASK(r13) + #ifdef CONFIG_PPC_P7_NAP /* * Check if thread was in power saving mode. We come here when any -- cgit v1.2.3-59-g8ed1b From 7368b38b21bfa39df637701a480262c15ab1a49e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:33:57 +1000 Subject: powerpc/pseries/ras: Avoid calling rtas_token() in NMI paths In the interest of reducing code and possible failures in the machine check and system reset paths, grab the "ibm,nmi-interlock" token at init time. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Reviewed-by: Mahesh Salgaonkar Link: https://lore.kernel.org/r/20200508043408.886394-6-npiggin@gmail.com --- arch/powerpc/include/asm/firmware.h | 1 + arch/powerpc/platforms/pseries/ras.c | 2 +- arch/powerpc/platforms/pseries/setup.c | 14 ++++++++++---- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index ca33f4ef6cb4..6003c2e533a0 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -128,6 +128,7 @@ extern void machine_check_fwnmi(void); /* This is true if we are using the firmware NMI handler (typically LPAR) */ extern int fwnmi_active; +extern int ibm_nmi_interlock_token; extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 1d1da639b8b7..ac92f8687ea3 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -458,7 +458,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) */ static void fwnmi_release_errinfo(void) { - int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL); + int ret = rtas_call(ibm_nmi_interlock_token, 0, 1, NULL); if (ret != 0) printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret); } diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 1b55e804927d..64d18f4bf093 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -84,6 +84,7 @@ unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); EXPORT_SYMBOL(CMO_PageSize); int fwnmi_active; /* TRUE if an FWNMI handler is present */ +int ibm_nmi_interlock_token; static void pSeries_show_cpuinfo(struct seq_file *m) { @@ -114,9 +115,14 @@ static void __init fwnmi_init(void) struct slb_entry *slb_ptr; size_t size; #endif + int ibm_nmi_register_token; - int ibm_nmi_register = rtas_token("ibm,nmi-register"); - if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE) + ibm_nmi_register_token = rtas_token("ibm,nmi-register"); + if (ibm_nmi_register_token == RTAS_UNKNOWN_SERVICE) + return; + + ibm_nmi_interlock_token = rtas_token("ibm,nmi-interlock"); + if (WARN_ON(ibm_nmi_interlock_token == RTAS_UNKNOWN_SERVICE)) return; /* If the kernel's not linked at zero we point the firmware at low @@ -124,8 +130,8 @@ static void __init fwnmi_init(void) system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START; machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START; - if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr, - machine_check_addr)) + if (0 == rtas_call(ibm_nmi_register_token, 2, 1, NULL, + system_reset_addr, machine_check_addr)) fwnmi_active = 1; /* -- cgit v1.2.3-59-g8ed1b From deb70f7a35a22dffa55b2c3aac71bc6fb0f486ce Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:33:58 +1000 Subject: powerpc/pseries/ras: Fix FWNMI_VALID off by one This was discovered developing qemu fwnmi sreset support. This off-by-one bug means the last 16 bytes of the rtas area can not be used for a 16 byte save area. It's not a serious bug, and QEMU implementation has to retain a workaround for old kernels, but it's good to tighten it. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Acked-by: Mahesh Salgaonkar Link: https://lore.kernel.org/r/20200508043408.886394-7-npiggin@gmail.com --- arch/powerpc/platforms/pseries/ras.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index ac92f8687ea3..a5bd0f747bb1 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -395,10 +395,11 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) /* * Some versions of FWNMI place the buffer inside the 4kB page starting at * 0x7000. Other versions place it inside the rtas buffer. We check both. + * Minimum size of the buffer is 16 bytes. */ #define VALID_FWNMI_BUFFER(A) \ - ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ - (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) + ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \ + (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16)))) static inline struct rtas_error_log *fwnmi_get_errlog(void) { -- cgit v1.2.3-59-g8ed1b From dff681e95a23f28b3c688a8bd5535f78bd726bc8 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:33:59 +1000 Subject: powerpc/pseries/ras: fwnmi avoid modifying r3 in error case If there is some error with the fwnmi save area, r3 has already been modified which doesn't help with debugging. Only update r3 when to restore the saved value. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200508043408.886394-8-npiggin@gmail.com --- arch/powerpc/platforms/pseries/ras.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index a5bd0f747bb1..fe14186a8cef 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -423,18 +423,19 @@ static inline struct rtas_error_log *fwnmi_get_errlog(void) */ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) { + unsigned long savep_ra; unsigned long *savep; struct rtas_error_log *h; /* Mask top two bits */ - regs->gpr[3] &= ~(0x3UL << 62); + savep_ra = regs->gpr[3] & ~(0x3UL << 62); - if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { + if (!VALID_FWNMI_BUFFER(savep_ra)) { printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); return NULL; } - savep = __va(regs->gpr[3]); + savep = __va(savep_ra); regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */ h = (struct rtas_error_log *)&savep[1]; -- cgit v1.2.3-59-g8ed1b From d7b14c5c042865070a1411078ab49ea17bad0b41 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:00 +1000 Subject: powerpc/pseries/ras: fwnmi sreset should not interlock PAPR does not specify that fwnmi sreset should be interlocked, and PowerVM (and therefore now QEMU) do not require it. These "ibm,nmi-interlock" calls are ignored by firmware, but there is a possibility that the sreset could have interrupted a machine check and release the machine check's interlock too early, corrupting it if another machine check came in. This is an extremely rare case, but it should be fixed for clarity and reducing the code executed in the sreset path. Firmware also does not provide error information for the sreset case to look at, so remove that comment. Signed-off-by: Nicholas Piggin [mpe: Use __be64 to silence some sparse warnings] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200508043408.886394-9-npiggin@gmail.com --- arch/powerpc/platforms/pseries/ras.c | 48 +++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index fe14186a8cef..d20aecc52ece 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -406,6 +406,20 @@ static inline struct rtas_error_log *fwnmi_get_errlog(void) return (struct rtas_error_log *)local_paca->mce_data_buf; } +static __be64 *fwnmi_get_savep(struct pt_regs *regs) +{ + unsigned long savep_ra; + + /* Mask top two bits */ + savep_ra = regs->gpr[3] & ~(0x3UL << 62); + if (!VALID_FWNMI_BUFFER(savep_ra)) { + printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); + return NULL; + } + + return __va(savep_ra); +} + /* * Get the error information for errors coming through the * FWNMI vectors. The pt_regs' r3 will be updated to reflect @@ -423,20 +437,14 @@ static inline struct rtas_error_log *fwnmi_get_errlog(void) */ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) { - unsigned long savep_ra; - unsigned long *savep; struct rtas_error_log *h; + __be64 *savep; - /* Mask top two bits */ - savep_ra = regs->gpr[3] & ~(0x3UL << 62); - - if (!VALID_FWNMI_BUFFER(savep_ra)) { - printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); + savep = fwnmi_get_savep(regs); + if (!savep) return NULL; - } - savep = __va(savep_ra); - regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */ + regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */ h = (struct rtas_error_log *)&savep[1]; /* Use the per cpu buffer from paca to store rtas error log */ @@ -483,11 +491,21 @@ int pSeries_system_reset_exception(struct pt_regs *regs) #endif if (fwnmi_active) { - struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs); - if (errhdr) { - /* XXX Should look at FWNMI information */ - } - fwnmi_release_errinfo(); + __be64 *savep; + + /* + * Firmware (PowerVM and KVM) saves r3 to a save area like + * machine check, which is not exactly what PAPR (2.9) + * suggests but there is no way to detect otherwise, so this + * is the interface now. + * + * System resets do not save any error log or require an + * "ibm,nmi-interlock" rtas call to release. + */ + + savep = fwnmi_get_savep(regs); + if (savep) + regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */ } if (smp_handle_nmi_ipi(regs)) -- cgit v1.2.3-59-g8ed1b From d2cbbd45d433b96e41711a293e59cff259143694 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:01 +1000 Subject: powerpc/pseries: Limit machine check stack to 4GB This allows rtas_args to be put on the machine check stack, which avoids a lot of complications with re-entrancy deadlocks. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Reviewed-by: Mahesh Salgaonkar Link: https://lore.kernel.org/r/20200508043408.886394-10-npiggin@gmail.com --- arch/powerpc/kernel/setup_64.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 438a9befce41..defe05b6b7a9 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -709,7 +709,7 @@ void __init exc_lvl_early_init(void) */ void __init emergency_stack_init(void) { - u64 limit; + u64 limit, mce_limit; unsigned int i; /* @@ -726,7 +726,16 @@ void __init emergency_stack_init(void) * initialized in kernel/irq.c. These are initialized here in order * to have emergency stacks available as early as possible. */ - limit = min(ppc64_bolted_size(), ppc64_rma_size); + limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size); + + /* + * Machine check on pseries calls rtas, but can't use the static + * rtas_args due to a machine check hitting while the lock is held. + * rtas args have to be under 4GB, so the machine check stack is + * limited to 4GB so args can be put on stack. + */ + if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G) + mce_limit = SZ_4G; for_each_possible_cpu(i) { paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; @@ -736,7 +745,7 @@ void __init emergency_stack_init(void) paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; /* emergency stack for machine check exception handling. */ - paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; + paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE; #endif } } -- cgit v1.2.3-59-g8ed1b From 2576f5f9169620bf329cf1e91086e6041b98e4b2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:02 +1000 Subject: powerpc/pseries: Machine check use rtas_call_unlocked() with args on stack With the previous patch, machine checks can use rtas_call_unlocked() which avoids the RTAS spinlock which would deadlock if a machine check hits while making an RTAS call. This also avoids the complex RTAS error logging which has more RTAS calls and includes kmalloc (which can return memory beyond RMA, which would also crash). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200508043408.886394-11-npiggin@gmail.com --- arch/powerpc/platforms/pseries/ras.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index d20aecc52ece..f3736fcd98fc 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -468,7 +468,15 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) */ static void fwnmi_release_errinfo(void) { - int ret = rtas_call(ibm_nmi_interlock_token, 0, 1, NULL); + struct rtas_args rtas_args; + int ret; + + /* + * On pseries, the machine check stack is limited to under 4GB, so + * args can be on-stack. + */ + rtas_call_unlocked(&rtas_args, ibm_nmi_interlock_token, 0, 1, NULL); + ret = be32_to_cpu(rtas_args.rets[0]); if (ret != 0) printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret); } -- cgit v1.2.3-59-g8ed1b From 116ac378bb3ff844df333e7609e7604651a0db9d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:03 +1000 Subject: powerpc/64s: machine check interrupt update NMI accounting machine_check_early() is taken as an NMI, so nmi_enter() is used there. machine_check_exception() is no longer taken as an NMI (it's invoked via irq_work in the case a machine check hits in kernel mode), so remove the nmi_enter() from that case. In NMI context, hash faults don't try to refill the hash table, which can lead to crashes accessing non-pinned kernel pages. System reset still has this potential problem. Signed-off-by: Nicholas Piggin [mpe: Drop change in show_regs() which breaks Book3E] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200508043408.886394-12-npiggin@gmail.com --- arch/powerpc/kernel/mce.c | 7 +++++++ arch/powerpc/kernel/traps.c | 14 +++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 8077b5fb18a7..be7e3f92a7b5 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -574,6 +574,9 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info); long machine_check_early(struct pt_regs *regs) { long handled = 0; + bool nested = in_nmi(); + if (!nested) + nmi_enter(); hv_nmi_check_nonrecoverable(regs); @@ -582,6 +585,10 @@ long machine_check_early(struct pt_regs *regs) */ if (ppc_md.machine_check_early) handled = ppc_md.machine_check_early(regs); + + if (!nested) + nmi_exit(); + return handled; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 3fca22276bb1..9f6852322e59 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -823,7 +823,19 @@ int machine_check_generic(struct pt_regs *regs) void machine_check_exception(struct pt_regs *regs) { int recover = 0; - bool nested = in_nmi(); + bool nested; + + /* + * BOOK3S_64 does not call this handler as a non-maskable interrupt + * (it uses its own early real-mode handler to handle the MCE proper + * and then raises irq_work to call this handler when interrupts are + * enabled). Set nested = true for this case, which just makes it avoid + * the nmi_enter/exit. + */ + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) || in_nmi()) + nested = true; + else + nested = false; if (!nested) nmi_enter(); -- cgit v1.2.3-59-g8ed1b From f2d7f62e4abdb03de3f4267361d96c417312d05c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:04 +1000 Subject: powerpc: Implement ftrace_enabled() helpers Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Link: https://lore.kernel.org/r/20200508043408.886394-13-npiggin@gmail.com --- arch/powerpc/include/asm/ftrace.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index f54a08a2cd70..bc76970b6ee5 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -108,9 +108,23 @@ static inline void this_cpu_enable_ftrace(void) { get_paca()->ftrace_enabled = 1; } + +/* Disable ftrace on this CPU if possible (may not be implemented) */ +static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) +{ + get_paca()->ftrace_enabled = ftrace_enabled; +} + +static inline u8 this_cpu_get_ftrace_enabled(void) +{ + return get_paca()->ftrace_enabled; +} + #else /* CONFIG_PPC64 */ static inline void this_cpu_disable_ftrace(void) { } static inline void this_cpu_enable_ftrace(void) { } +static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { } +static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; } #endif /* CONFIG_PPC64 */ #endif /* !__ASSEMBLY__ */ -- cgit v1.2.3-59-g8ed1b From abd106fb437ad1cd8c8df8ccabd0fa941ef6342a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:05 +1000 Subject: powerpc/64s: machine check do not trace real-mode handler Rather than notrace annotations throughout a significant part of the machine check code across kernel/ pseries/ and powernv/ which can easily be broken and is infrequently tested, use paca->ftrace_enabled to blanket-disable tracing of the real-mode non-maskable handler. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Acked-by: Naveen N. Rao Link: https://lore.kernel.org/r/20200508043408.886394-14-npiggin@gmail.com --- arch/powerpc/kernel/mce.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index be7e3f92a7b5..fd90c0eda229 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -571,10 +572,14 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info); * * regs->nip and regs->msr contains srr0 and ssr1. */ -long machine_check_early(struct pt_regs *regs) +long notrace machine_check_early(struct pt_regs *regs) { long handled = 0; bool nested = in_nmi(); + u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); + + this_cpu_set_ftrace_enabled(0); + if (!nested) nmi_enter(); @@ -589,6 +594,8 @@ long machine_check_early(struct pt_regs *regs) if (!nested) nmi_exit(); + this_cpu_set_ftrace_enabled(ftrace_enabled); + return handled; } -- cgit v1.2.3-59-g8ed1b From bbbc8032b00f8ef287894425fbdb691049e28d39 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:06 +1000 Subject: powerpc/traps: Do not trace system reset Similarly to the previous patch, do not trace system reset. This code is used when there is a crash or hang, and tracing disturbs the system more and has been known to crash in the crash handling path. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Acked-by: Naveen N. Rao Link: https://lore.kernel.org/r/20200508043408.886394-15-npiggin@gmail.com --- arch/powerpc/kernel/traps.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 9f6852322e59..ee209c5a1ad7 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -443,6 +443,9 @@ void system_reset_exception(struct pt_regs *regs) unsigned long hsrr0, hsrr1; bool nested = in_nmi(); bool saved_hsrrs = false; + u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); + + this_cpu_set_ftrace_enabled(0); /* * Avoid crashes in case of nested NMI exceptions. Recoverability @@ -524,6 +527,8 @@ out: if (!nested) nmi_exit(); + this_cpu_set_ftrace_enabled(ftrace_enabled); + /* What should we do here? We could issue a shutdown or hard reset. */ } -- cgit v1.2.3-59-g8ed1b From 265d6e588d87194c2fe2d6c240247f0264e0c19b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:07 +1000 Subject: powerpc/traps: Make unrecoverable NMIs die instead of panic System Reset and Machine Check interrupts that are not recoverable due to being nested or interrupting when RI=0 currently panic. This is not necessary, and can often just kill the current context and recover. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Link: https://lore.kernel.org/r/20200508043408.886394-16-npiggin@gmail.com --- arch/powerpc/kernel/traps.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index ee209c5a1ad7..477befcda8d3 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -513,11 +513,11 @@ out: #ifdef CONFIG_PPC_BOOK3S_64 BUG_ON(get_paca()->in_nmi == 0); if (get_paca()->in_nmi > 1) - nmi_panic(regs, "Unrecoverable nested System Reset"); + die("Unrecoverable nested System Reset", regs, SIGABRT); #endif /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable System Reset"); + die("Unrecoverable System Reset", regs, SIGABRT); if (saved_hsrrs) { mtspr(SPRN_HSRR0, hsrr0); @@ -875,7 +875,7 @@ void machine_check_exception(struct pt_regs *regs) /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable Machine check"); + die("Unrecoverable Machine check", regs, SIGBUS); return; -- cgit v1.2.3-59-g8ed1b From 4c592a34391ea4987d29c1718f931b50416ca015 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Sat, 9 May 2020 18:58:31 +0000 Subject: powerpc/head_check: Automatic verbosity To aid debugging build problems turn on shell tracing for the head_check script when the build is verbose. Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1ae1aed811ba6760af2e46d331285dd6a4de5b80.1589049250.git.geoff@infradead.org --- arch/powerpc/tools/head_check.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/tools/head_check.sh b/arch/powerpc/tools/head_check.sh index ad9e57209aa4..37061fb9b58e 100644 --- a/arch/powerpc/tools/head_check.sh +++ b/arch/powerpc/tools/head_check.sh @@ -31,8 +31,10 @@ # level entry code (boot, interrupt vectors, etc) until r2 is set up. This # could cause the kernel to die in early boot. -# Turn this on if you want more debug output: -# set -x +# Allow for verbose output +if [ "$V" = "1" ]; then + set -x +fi if [ $# -lt 2 ]; then echo "$0 [path to nm] [path to vmlinux]" 1>&2 -- cgit v1.2.3-59-g8ed1b From f61200d3e3386e78d49677dfb3911c9d7c0dfe4b Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Sat, 9 May 2020 18:58:31 +0000 Subject: powerpc/wrapper: Output linker map file To aid debugging wrapper troubles, output a linker map file 'wrapper.map' when the build is verbose. Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/fb477f5e91c6b74a1dec98df3cc0a1c91632d94d.1589049250.git.geoff@infradead.org --- arch/powerpc/boot/wrapper | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index ed6266367bc0..35ace40d9fc2 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -29,6 +29,7 @@ set -e # Allow for verbose output if [ "$V" = 1 ]; then set -x + map="-Map wrapper.map" fi # defaults @@ -500,7 +501,7 @@ if [ "$platform" != "miboot" ]; then text_start="-Ttext $link_address" fi #link everything - ${CROSS}ld -m $format -T $lds $text_start $pie $nodl -o "$ofile" \ + ${CROSS}ld -m $format -T $lds $text_start $pie $nodl -o "$ofile" $map \ $platformo $tmp $object/wrapper.a rm $tmp fi -- cgit v1.2.3-59-g8ed1b From 331aa46aaf51325d8532a4948f5127b2edc441a5 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Sat, 9 May 2020 18:58:31 +0000 Subject: powerpc/head_check: Avoid broken pipe Remove the '-m4' option to grep to allow grep to process all of nm's output. This avoids the nm warning: nm terminated with signal 13 [Broken pipe] Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/872b6c84a4250ff140e476c62cabe9e56a02b6c2.1589049250.git.geoff@infradead.org --- arch/powerpc/tools/head_check.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/tools/head_check.sh b/arch/powerpc/tools/head_check.sh index 37061fb9b58e..e32d3162e5ed 100644 --- a/arch/powerpc/tools/head_check.sh +++ b/arch/powerpc/tools/head_check.sh @@ -46,7 +46,7 @@ nm="$1" vmlinux="$2" # gcc-4.6-era toolchain make _stext an A (absolute) symbol rather than T -$nm "$vmlinux" | grep -e " [TA] _stext$" -e " t start_first_256B$" -e " a text_start$" -e " t start_text$" -m4 > .tmp_symbols.txt +$nm "$vmlinux" | grep -e " [TA] _stext$" -e " t start_first_256B$" -e " a text_start$" -e " t start_text$" > .tmp_symbols.txt vma=$(cat .tmp_symbols.txt | grep -e " [TA] _stext$" | cut -d' ' -f1) -- cgit v1.2.3-59-g8ed1b From 6a8aa782cece2330322c33452a767f53f8ba38c9 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 9 May 2020 18:58:31 +0000 Subject: drivers/ps3: Remove duplicate error messages Remove duplicate memory allocation failure error messages. Signed-off-by: Markus Elfring Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c763425d8e6f680d3180b3246c9e77727df179d0.1589049250.git.geoff@infradead.org --- drivers/ps3/ps3-lpm.c | 2 -- drivers/ps3/ps3-vuart.c | 1 - 2 files changed, 3 deletions(-) diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c index 064b5884ba13..e54aa2d82f50 100644 --- a/drivers/ps3/ps3-lpm.c +++ b/drivers/ps3/ps3-lpm.c @@ -1111,8 +1111,6 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache, lpm_priv->tb_cache_internal = kzalloc( lpm_priv->tb_cache_size + 127, GFP_KERNEL); if (!lpm_priv->tb_cache_internal) { - dev_err(sbd_core(), "%s:%u: alloc internal tb_cache " - "failed\n", __func__, __LINE__); result = -ENOMEM; goto fail_malloc; } diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c index 8e80e0933a1b..4ed131eaff51 100644 --- a/drivers/ps3/ps3-vuart.c +++ b/drivers/ps3/ps3-vuart.c @@ -917,7 +917,6 @@ static int ps3_vuart_bus_interrupt_get(void) vuart_bus_priv.bmp = kzalloc(sizeof(struct ports_bmp), GFP_KERNEL); if (!vuart_bus_priv.bmp) { - pr_debug("%s:%d: kzalloc failed.\n", __func__, __LINE__); result = -ENOMEM; goto fail_bmp_malloc; } -- cgit v1.2.3-59-g8ed1b From 7b27b95a894d6a85c076f8d1f00e35316739bf51 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 9 May 2020 18:58:32 +0000 Subject: net/ps3_gelic_net: Remove duplicate error message Remove an extra message for a memory allocation failure in function gelic_descr_prepare_rx(). Signed-off-by: Markus Elfring Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ba4bea4da97308c804fd3a0fae3773dde27b20ce.1589049250.git.geoff@infradead.org --- drivers/net/ethernet/toshiba/ps3_gelic_net.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 070dd6fa9401..8522f3898e0d 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -382,8 +382,6 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1); if (!descr->skb) { descr->buf_addr = 0; /* tell DMAC don't touch memory */ - dev_info(ctodev(card), - "%s:allocate skb failed !!\n", __func__); return -ENOMEM; } descr->buf_size = cpu_to_be32(bufsize); -- cgit v1.2.3-59-g8ed1b From 720bc316690bd27dea9d71510b50f0cd698ffc32 Mon Sep 17 00:00:00 2001 From: Emmanuel Nicolet Date: Sat, 9 May 2020 18:58:32 +0000 Subject: ps3disk: use the default segment boundary Since commit dcebd755926b ("block: use bio_for_each_bvec() to compute multi-page bvec count"), the kernel will bug_on on the PS3 because bio_split() is called with sectors == 0: kernel BUG at block/bio.c:1853! Oops: Exception in kernel mode, sig: 5 [#1] BE PAGE_SIZE=4K MMU=Hash PREEMPT SMP NR_CPUS=8 NUMA PS3 Modules linked in: firewire_sbp2 rtc_ps3(+) soundcore ps3_gelic(+) \ ps3rom(+) firewire_core ps3vram(+) usb_common crc_itu_t CPU: 0 PID: 97 Comm: blkid Not tainted 5.3.0-rc4 #1 NIP: c00000000027d0d0 LR: c00000000027d0b0 CTR: 0000000000000000 REGS: c00000000135ae90 TRAP: 0700 Not tainted (5.3.0-rc4) MSR: 8000000000028032 CR: 44008240 XER: 20000000 IRQMASK: 0 GPR00: c000000000289368 c00000000135b120 c00000000084a500 c000000004ff8300 GPR04: 0000000000000c00 c000000004c905e0 c000000004c905e0 000000000000ffff GPR08: 0000000000000000 0000000000000001 0000000000000000 000000000000ffff GPR12: 0000000000000000 c0000000008ef000 000000000000003e 0000000000080001 GPR16: 0000000000000100 000000000000ffff 0000000000000000 0000000000000004 GPR20: c00000000062fd7e 0000000000000001 000000000000ffff 0000000000000080 GPR24: c000000000781788 c00000000135b350 0000000000000080 c000000004c905e0 GPR28: c00000000135b348 c000000004ff8300 0000000000000000 c000000004c90000 NIP [c00000000027d0d0] .bio_split+0x28/0xac LR [c00000000027d0b0] .bio_split+0x8/0xac Call Trace: [c00000000135b120] [c00000000027d130] .bio_split+0x88/0xac (unreliable) [c00000000135b1b0] [c000000000289368] .__blk_queue_split+0x11c/0x53c [c00000000135b2d0] [c00000000028f614] .blk_mq_make_request+0x80/0x7d4 [c00000000135b3d0] [c000000000283a8c] .generic_make_request+0x118/0x294 [c00000000135b4b0] [c000000000283d34] .submit_bio+0x12c/0x174 [c00000000135b580] [c000000000205a44] .mpage_bio_submit+0x3c/0x4c [c00000000135b600] [c000000000206184] .mpage_readpages+0xa4/0x184 [c00000000135b750] [c0000000001ff8fc] .blkdev_readpages+0x24/0x38 [c00000000135b7c0] [c0000000001589f0] .read_pages+0x6c/0x1a8 [c00000000135b8b0] [c000000000158c74] .__do_page_cache_readahead+0x118/0x184 [c00000000135b9b0] [c0000000001591a8] .force_page_cache_readahead+0xe4/0xe8 [c00000000135ba50] [c00000000014fc24] .generic_file_read_iter+0x1d8/0x830 [c00000000135bb50] [c0000000001ffadc] .blkdev_read_iter+0x40/0x5c [c00000000135bbc0] [c0000000001b9e00] .new_sync_read+0x144/0x1a0 [c00000000135bcd0] [c0000000001bc454] .vfs_read+0xa0/0x124 [c00000000135bd70] [c0000000001bc7a4] .ksys_read+0x70/0xd8 [c00000000135be20] [c00000000000a524] system_call+0x5c/0x70 Instruction dump: 7fe3fb78 482e30dc 7c0802a6 482e3085 7c9e2378 f821ff71 7ca42b78 7d3e00d0 7c7d1b78 79290fe0 7cc53378 69290001 <0b090000> 81230028 7bca0020 7929ba62 [ end trace 313fec760f30aa1f ]--- The problem originates from setting the segment boundary of the request queue to -1UL. This makes get_max_segment_size() return zero when offset is zero, whatever the max segment size. The test with BLK_SEG_BOUNDARY_MASK fails and 'mask - (mask & offset) + 1' overflows to zero in the return statement. Not setting the segment boundary and using the default value (BLK_SEG_BOUNDARY_MASK) fixes the problem. Signed-off-by: Emmanuel Nicolet Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/060a416c43138f45105c0540eff1a45539f7e2fc.1589049250.git.geoff@infradead.org --- drivers/block/ps3disk.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index c5c6487a19d5..7b55811c2a81 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -454,7 +454,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) queue->queuedata = dev; blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); - blk_queue_segment_boundary(queue, -1UL); blk_queue_dma_alignment(queue, dev->blk_size-1); blk_queue_logical_block_size(queue, dev->blk_size); -- cgit v1.2.3-59-g8ed1b From 126554465d93b10662742128918a5fc338cda4aa Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Sat, 9 May 2020 18:58:32 +0000 Subject: powerpc/ps3: Fix kexec shutdown hang The ps3_mm_region_destroy() and ps3_mm_vas_destroy() routines are called very late in the shutdown via kexec's mmu_cleanup_all routine. By the time mmu_cleanup_all runs it is too late to use udbg_printf, and calling it will cause PS3 systems to hang. Remove all debugging statements from ps3_mm_region_destroy() and ps3_mm_vas_destroy() and replace any error reporting with calls to lv1_panic. With this change builds with 'DEBUG' defined will not cause kexec reboots to hang, and builds with 'DEBUG' defined or not will end in lv1_panic if an error is encountered. Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7325c4af2b4c989c19d6a26b90b1fec9c0615ddf.1589049250.git.geoff@infradead.org --- arch/powerpc/platforms/ps3/mm.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index b83f2c851b40..d094321964fb 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -200,13 +200,14 @@ void ps3_mm_vas_destroy(void) { int result; - DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); - if (map.vas_id) { result = lv1_select_virtual_address_space(0); - BUG_ON(result); - result = lv1_destruct_virtual_address_space(map.vas_id); - BUG_ON(result); + result += lv1_destruct_virtual_address_space(map.vas_id); + + if (result) { + lv1_panic(0); + } + map.vas_id = 0; } } @@ -304,19 +305,20 @@ static void ps3_mm_region_destroy(struct mem_region *r) int result; if (!r->destroy) { - pr_info("%s:%d: Not destroying high region: %llxh %llxh\n", - __func__, __LINE__, r->base, r->size); return; } - DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); - if (r->base) { result = lv1_release_memory(r->base); - BUG_ON(result); + + if (result) { + lv1_panic(0); + } + r->size = r->base = r->offset = 0; map.total = map.rm.size; } + ps3_mm_set_repository_highmem(NULL); } -- cgit v1.2.3-59-g8ed1b From 802268fd82676ffce432776f60b93a0b15e58e0c Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:21 +1000 Subject: powerpc/xmon: Remove store_inst() for patch_instruction() For modifying instructions in xmon, patch_instruction() can serve the same role that store_inst() is performing with the advantage of not being specific to xmon. In some places patch_instruction() is already being using followed by store_inst(). In these cases just remove the store_inst(). Otherwise replace store_inst() with patch_instruction(). Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Nicholas Piggin Link: https://lore.kernel.org/r/20200506034050.24806-2-jniethe5@gmail.com --- arch/powerpc/xmon/xmon.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a7430632bab4..f65cb5bafc0f 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -326,11 +326,6 @@ static inline void sync(void) asm volatile("sync; isync"); } -static inline void store_inst(void *p) -{ - asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p)); -} - static inline void cflush(void *p) { asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p)); @@ -882,8 +877,7 @@ static struct bpt *new_breakpoint(unsigned long a) for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { bp->address = a; - bp->instr[1] = bpinstr; - store_inst(&bp->instr[1]); + patch_instruction(&bp->instr[1], bpinstr); return bp; } } @@ -895,25 +889,26 @@ static struct bpt *new_breakpoint(unsigned long a) static void insert_bpts(void) { int i; + unsigned int instr; struct bpt *bp; bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) { if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0) continue; - if (mread(bp->address, &bp->instr[0], 4) != 4) { + if (mread(bp->address, &instr, 4) != 4) { printf("Couldn't read instruction at %lx, " "disabling breakpoint there\n", bp->address); bp->enabled = 0; continue; } - if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) { + if (IS_MTMSRD(instr) || IS_RFID(instr)) { printf("Breakpoint at %lx is on an mtmsrd or rfid " "instruction, disabling it\n", bp->address); bp->enabled = 0; continue; } - store_inst(&bp->instr[0]); + patch_instruction(bp->instr, instr); if (bp->enabled & BP_CIABR) continue; if (patch_instruction((unsigned int *)bp->address, @@ -923,7 +918,6 @@ static void insert_bpts(void) bp->enabled &= ~BP_TRAP; continue; } - store_inst((void *)bp->address); } } @@ -958,8 +952,6 @@ static void remove_bpts(void) (unsigned int *)bp->address, bp->instr[0]) != 0) printf("Couldn't remove breakpoint at %lx\n", bp->address); - else - store_inst((void *)bp->address); } } -- cgit v1.2.3-59-g8ed1b From 51c9ba11f17f25ace1ea6bbfd4586c59105432de Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:22 +1000 Subject: powerpc/xmon: Move breakpoint instructions to own array To execute an instruction out of line after a breakpoint, the NIP is set to the address of struct bpt::instr. Here a copy of the instruction that was replaced with a breakpoint is kept, along with a trap so normal flow can be resumed after XOLing. The struct bpt's are located within the data section. This is problematic as the data section may be marked as no execute. Instead of each struct bpt holding the instructions to be XOL'd, make a new array, bpt_table[], with enough space to hold instructions for the number of supported breakpoints. A later patch will move this to the text section. Make struct bpt::instr a pointer to the instructions in bpt_table[] associated with that breakpoint. This association is a simple mapping: bpts[n] -> bpt_table[n * words per breakpoint]. Currently we only need the copied instruction followed by a trap, so 2 words per breakpoint. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-3-jniethe5@gmail.com --- arch/powerpc/xmon/xmon.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index f65cb5bafc0f..afb28ad660a7 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -98,7 +98,7 @@ static long *xmon_fault_jmp[NR_CPUS]; /* Breakpoint stuff */ struct bpt { unsigned long address; - unsigned int instr[2]; + unsigned int *instr; atomic_t ref_count; int enabled; unsigned long pad; @@ -117,6 +117,10 @@ static unsigned bpinstr = 0x7fe00008; /* trap */ #define BP_NUM(bp) ((bp) - bpts + 1) +#define BPT_SIZE (sizeof(unsigned int) * 2) +#define BPT_WORDS (BPT_SIZE / sizeof(unsigned int)) +static unsigned int bpt_table[NBPTS * BPT_WORDS]; + /* Prototypes */ static int cmds(struct pt_regs *); static int mread(unsigned long, void *, int); @@ -854,15 +858,13 @@ static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp) { unsigned long off; - off = nip - (unsigned long) bpts; - if (off >= sizeof(bpts)) + off = nip - (unsigned long)bpt_table; + if (off >= sizeof(bpt_table)) return NULL; - off %= sizeof(struct bpt); - if (off != offsetof(struct bpt, instr[0]) - && off != offsetof(struct bpt, instr[1])) + *offp = off % BPT_SIZE; + if (*offp != 0 && *offp != 4) return NULL; - *offp = off - offsetof(struct bpt, instr[0]); - return (struct bpt *) (nip - off); + return bpts + (off / BPT_SIZE); } static struct bpt *new_breakpoint(unsigned long a) @@ -877,7 +879,8 @@ static struct bpt *new_breakpoint(unsigned long a) for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { bp->address = a; - patch_instruction(&bp->instr[1], bpinstr); + bp->instr = bpt_table + ((bp - bpts) * BPT_WORDS); + patch_instruction(bp->instr + 1, bpinstr); return bp; } } -- cgit v1.2.3-59-g8ed1b From 4eff2b4f32a309e2171bfe53db3e93b5614f77cb Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:23 +1000 Subject: powerpc/xmon: Move breakpoints to text section The instructions for xmon's breakpoint are stored bpt_table[] which is in the data section. This is problematic as the data section may be marked as no execute. Move bpt_table[] to the text section. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-4-jniethe5@gmail.com --- arch/powerpc/kernel/asm-offsets.c | 8 ++++++++ arch/powerpc/xmon/Makefile | 2 +- arch/powerpc/xmon/xmon.c | 6 +----- arch/powerpc/xmon/xmon_bpts.S | 9 +++++++++ arch/powerpc/xmon/xmon_bpts.h | 14 ++++++++++++++ 5 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 arch/powerpc/xmon/xmon_bpts.S create mode 100644 arch/powerpc/xmon/xmon_bpts.h diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index fcf24a365fc0..9b9cde07e396 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -70,6 +70,10 @@ #include #endif +#ifdef CONFIG_XMON +#include "../xmon/xmon_bpts.h" +#endif + #define STACK_PT_REGS_OFFSET(sym, val) \ DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val)) @@ -795,5 +799,9 @@ int main(void) DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE)); #endif +#ifdef CONFIG_XMON + DEFINE(BPT_SIZE, BPT_SIZE); +#endif + return 0; } diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 6f9cccea54f3..89c76ca35640 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -18,7 +18,7 @@ endif ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) -obj-y += xmon.o nonstdio.o spr_access.o +obj-y += xmon.o nonstdio.o spr_access.o xmon_bpts.o ifdef CONFIG_XMON_DISASSEMBLY obj-y += ppc-dis.o ppc-opc.o diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index afb28ad660a7..948d025f2939 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -62,6 +62,7 @@ #include "nonstdio.h" #include "dis-asm.h" +#include "xmon_bpts.h" #ifdef CONFIG_SMP static cpumask_t cpus_in_xmon = CPU_MASK_NONE; @@ -109,7 +110,6 @@ struct bpt { #define BP_TRAP 2 #define BP_DABR 4 -#define NBPTS 256 static struct bpt bpts[NBPTS]; static struct bpt dabr; static struct bpt *iabr; @@ -117,10 +117,6 @@ static unsigned bpinstr = 0x7fe00008; /* trap */ #define BP_NUM(bp) ((bp) - bpts + 1) -#define BPT_SIZE (sizeof(unsigned int) * 2) -#define BPT_WORDS (BPT_SIZE / sizeof(unsigned int)) -static unsigned int bpt_table[NBPTS * BPT_WORDS]; - /* Prototypes */ static int cmds(struct pt_regs *); static int mread(unsigned long, void *, int); diff --git a/arch/powerpc/xmon/xmon_bpts.S b/arch/powerpc/xmon/xmon_bpts.S new file mode 100644 index 000000000000..f3ad0ab50854 --- /dev/null +++ b/arch/powerpc/xmon/xmon_bpts.S @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include "xmon_bpts.h" + +.global bpt_table +bpt_table: + .space NBPTS * BPT_SIZE diff --git a/arch/powerpc/xmon/xmon_bpts.h b/arch/powerpc/xmon/xmon_bpts.h new file mode 100644 index 000000000000..b7e94375db86 --- /dev/null +++ b/arch/powerpc/xmon/xmon_bpts.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef XMON_BPTS_H +#define XMON_BPTS_H + +#define NBPTS 256 +#ifndef __ASSEMBLY__ +#define BPT_SIZE (sizeof(unsigned int) * 2) +#define BPT_WORDS (BPT_SIZE / sizeof(unsigned int)) + +extern unsigned int bpt_table[NBPTS * BPT_WORDS]; + +#endif /* __ASSEMBLY__ */ + +#endif /* XMON_BPTS_H */ -- cgit v1.2.3-59-g8ed1b From 5a7fdcab54ef17c395fc47e73c828a1432e51683 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:24 +1000 Subject: powerpc/xmon: Use bitwise calculations in_breakpoint_table() A modulo operation is used for calculating the current offset from a breakpoint within the breakpoint table. As instruction lengths are always a power of 2, this can be replaced with a bitwise 'and'. The current check for word alignment can be replaced with checking that the lower 2 bits are not set. Suggested-by: Christophe Leroy Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-5-jniethe5@gmail.com --- arch/powerpc/xmon/xmon.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 948d025f2939..0fa3aaeee105 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -857,8 +857,8 @@ static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp) off = nip - (unsigned long)bpt_table; if (off >= sizeof(bpt_table)) return NULL; - *offp = off % BPT_SIZE; - if (*offp != 0 && *offp != 4) + *offp = off & (BPT_SIZE - 1); + if (off & 3) return NULL; return bpts + (off / BPT_SIZE); } -- cgit v1.2.3-59-g8ed1b From 7c95d8893fb55869882c9f68f4c94840dc43f18f Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:25 +1000 Subject: powerpc: Change calling convention for create_branch() et. al. create_branch(), create_cond_branch() and translate_branch() return the instruction that they create, or return 0 to signal an error. Separate these concerns in preparation for an instruction type that is not just an unsigned int. Fill the created instruction to a pointer passed as the first parameter to the function and use a non-zero return value to signify an error. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-6-jniethe5@gmail.com --- arch/powerpc/include/asm/code-patching.h | 12 +-- arch/powerpc/kernel/optprobes.c | 24 +++--- arch/powerpc/kernel/setup_32.c | 4 +- arch/powerpc/kernel/trace/ftrace.c | 24 +++--- arch/powerpc/lib/code-patching.c | 134 ++++++++++++++++++------------- arch/powerpc/lib/feature-fixups.c | 5 +- 6 files changed, 119 insertions(+), 84 deletions(-) diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 898b54262881..351dda7215b6 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -22,10 +22,10 @@ #define BRANCH_ABSOLUTE 0x2 bool is_offset_in_branch_range(long offset); -unsigned int create_branch(const unsigned int *addr, - unsigned long target, int flags); -unsigned int create_cond_branch(const unsigned int *addr, - unsigned long target, int flags); +int create_branch(unsigned int *instr, const unsigned int *addr, + unsigned long target, int flags); +int create_cond_branch(unsigned int *instr, const unsigned int *addr, + unsigned long target, int flags); int patch_branch(unsigned int *addr, unsigned long target, int flags); int patch_instruction(unsigned int *addr, unsigned int instr); int raw_patch_instruction(unsigned int *addr, unsigned int instr); @@ -60,8 +60,8 @@ int instr_is_relative_branch(unsigned int instr); int instr_is_relative_link_branch(unsigned int instr); int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); unsigned long branch_target(const unsigned int *instr); -unsigned int translate_branch(const unsigned int *dest, - const unsigned int *src); +int translate_branch(unsigned int *instr, const unsigned int *dest, + const unsigned int *src); extern bool is_conditional_branch(unsigned int instr); #ifdef CONFIG_PPC_BOOK3E_64 void __patch_exception(int exc, unsigned long addr); diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 024f7aad1952..445b3dad82dc 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -251,15 +251,17 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) goto error; } - branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX, - (unsigned long)op_callback_addr, - BRANCH_SET_LINK); + rc = create_branch(&branch_op_callback, + (unsigned int *)buff + TMPL_CALL_HDLR_IDX, + (unsigned long)op_callback_addr, + BRANCH_SET_LINK); - branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX, - (unsigned long)emulate_step_addr, - BRANCH_SET_LINK); + rc |= create_branch(&branch_emulate_step, + (unsigned int *)buff + TMPL_EMULATE_IDX, + (unsigned long)emulate_step_addr, + BRANCH_SET_LINK); - if (!branch_op_callback || !branch_emulate_step) + if (rc) goto error; patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback); @@ -305,6 +307,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) void arch_optimize_kprobes(struct list_head *oplist) { + unsigned int instr; struct optimized_kprobe *op; struct optimized_kprobe *tmp; @@ -315,9 +318,10 @@ void arch_optimize_kprobes(struct list_head *oplist) */ memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE); - patch_instruction(op->kp.addr, - create_branch((unsigned int *)op->kp.addr, - (unsigned long)op->optinsn.insn, 0)); + create_branch(&instr, + (unsigned int *)op->kp.addr, + (unsigned long)op->optinsn.insn, 0); + patch_instruction(op->kp.addr, instr); list_del_init(&op->list); } } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 305ca89d856f..3a43e8e847c8 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -75,7 +75,7 @@ EXPORT_SYMBOL(DMA_MODE_WRITE); notrace void __init machine_init(u64 dt_ptr) { unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache); - unsigned long insn; + unsigned int insn; /* Configure static keys first, now that we're relocated. */ setup_feature_keys(); @@ -87,7 +87,7 @@ notrace void __init machine_init(u64 dt_ptr) patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP); - insn = create_cond_branch(addr, branch_target(addr), 0x820000); + create_cond_branch(&insn, addr, branch_target(addr), 0x820000); patch_instruction(addr, insn); /* replace b by bne cr0 */ /* Do some early initialization based on the flat device tree */ diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 7ea0ca044b65..8799d891320c 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -48,7 +48,7 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link) addr = ppc_function_entry((void *)addr); /* if (link) set op to 'bl' else 'b' */ - op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); + create_branch(&op, (unsigned int *)ip, addr, link ? 1 : 0); return op; } @@ -89,10 +89,11 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) */ static int test_24bit_addr(unsigned long ip, unsigned long addr) { + unsigned int op; addr = ppc_function_entry((void *)addr); /* use the create_branch to verify that this offset can be branched */ - return create_branch((unsigned int *)ip, addr, 0); + return create_branch(&op, (unsigned int *)ip, addr, 0) == 0; } static int is_bl_op(unsigned int op) @@ -287,6 +288,7 @@ __ftrace_make_nop(struct module *mod, static unsigned long find_ftrace_tramp(unsigned long ip) { int i; + unsigned int instr; /* * We have the compiler generated long_branch tramps at the end @@ -295,7 +297,8 @@ static unsigned long find_ftrace_tramp(unsigned long ip) for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--) if (!ftrace_tramps[i]) continue; - else if (create_branch((void *)ip, ftrace_tramps[i], 0)) + else if (create_branch(&instr, (void *)ip, + ftrace_tramps[i], 0) == 0) return ftrace_tramps[i]; return 0; @@ -324,6 +327,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) { int i, op; unsigned long ptr; + unsigned int instr; static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS]; /* Is this a known long jump tramp? */ @@ -366,7 +370,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) #else ptr = ppc_global_function_entry((void *)ftrace_caller); #endif - if (!create_branch((void *)tramp, ptr, 0)) { + if (create_branch(&instr, (void *)tramp, ptr, 0)) { pr_debug("%ps is not reachable from existing mcount tramp\n", (void *)ptr); return -1; @@ -511,6 +515,7 @@ static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned int op[2]; + unsigned int instr; void *ip = (void *)rec->ip; unsigned long entry, ptr, tramp; struct module *mod = rec->arch.mod; @@ -557,7 +562,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) } /* Ensure branch is within 24 bits */ - if (!create_branch(ip, tramp, BRANCH_SET_LINK)) { + if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) { pr_err("Branch out of range\n"); return -EINVAL; } @@ -574,6 +579,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { + int err; unsigned int op; unsigned long ip = rec->ip; @@ -594,9 +600,9 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) } /* create the branch to the trampoline */ - op = create_branch((unsigned int *)ip, - rec->arch.mod->arch.tramp, BRANCH_SET_LINK); - if (!op) { + err = create_branch(&op, (unsigned int *)ip, + rec->arch.mod->arch.tramp, BRANCH_SET_LINK); + if (err) { pr_err("REL24 out of range!\n"); return -EINVAL; } @@ -776,7 +782,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } /* Ensure branch is within 24 bits */ - if (!create_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + if (create_branch(&op, (unsigned int *)ip, tramp, BRANCH_SET_LINK)) { pr_err("Branch out of range\n"); return -EINVAL; } diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 3345f039a876..6ed3301c0582 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -196,7 +196,10 @@ NOKPROBE_SYMBOL(patch_instruction); int patch_branch(unsigned int *addr, unsigned long target, int flags) { - return patch_instruction(addr, create_branch(addr, target, flags)); + unsigned int instr; + + create_branch(&instr, addr, target, flags); + return patch_instruction(addr, instr); } bool is_offset_in_branch_range(long offset) @@ -243,30 +246,30 @@ bool is_conditional_branch(unsigned int instr) } NOKPROBE_SYMBOL(is_conditional_branch); -unsigned int create_branch(const unsigned int *addr, - unsigned long target, int flags) +int create_branch(unsigned int *instr, + const unsigned int *addr, + unsigned long target, int flags) { - unsigned int instruction; long offset; + *instr = 0; offset = target; if (! (flags & BRANCH_ABSOLUTE)) offset = offset - (unsigned long)addr; /* Check we can represent the target in the instruction format */ if (!is_offset_in_branch_range(offset)) - return 0; + return 1; /* Mask out the flags and target, so they don't step on each other. */ - instruction = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC); + *instr = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC); - return instruction; + return 0; } -unsigned int create_cond_branch(const unsigned int *addr, - unsigned long target, int flags) +int create_cond_branch(unsigned int *instr, const unsigned int *addr, + unsigned long target, int flags) { - unsigned int instruction; long offset; offset = target; @@ -275,12 +278,12 @@ unsigned int create_cond_branch(const unsigned int *addr, /* Check we can represent the target in the instruction format */ if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3) - return 0; + return 1; /* Mask out the flags and target, so they don't step on each other. */ - instruction = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC); + *instr = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC); - return instruction; + return 0; } static unsigned int branch_opcode(unsigned int instr) @@ -361,18 +364,19 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr) return 0; } -unsigned int translate_branch(const unsigned int *dest, const unsigned int *src) +int translate_branch(unsigned int *instr, const unsigned int *dest, + const unsigned int *src) { unsigned long target; target = branch_target(src); if (instr_is_branch_iform(*src)) - return create_branch(dest, target, *src); + return create_branch(instr, dest, target, *src); else if (instr_is_branch_bform(*src)) - return create_cond_branch(dest, target, *src); + return create_cond_branch(instr, dest, target, *src); - return 0; + return 1; } #ifdef CONFIG_PPC_BOOK3E_64 @@ -403,6 +407,7 @@ static void __init test_trampoline(void) static void __init test_branch_iform(void) { + int err; unsigned int instr; unsigned long addr; @@ -443,35 +448,35 @@ static void __init test_branch_iform(void) check(instr_is_branch_to_addr(&instr, addr - 0x2000000)); /* Branch to self, with link */ - instr = create_branch(&instr, addr, BRANCH_SET_LINK); + err = create_branch(&instr, &instr, addr, BRANCH_SET_LINK); check(instr_is_branch_to_addr(&instr, addr)); /* Branch to self - 0x100, with link */ - instr = create_branch(&instr, addr - 0x100, BRANCH_SET_LINK); + err = create_branch(&instr, &instr, addr - 0x100, BRANCH_SET_LINK); check(instr_is_branch_to_addr(&instr, addr - 0x100)); /* Branch to self + 0x100, no link */ - instr = create_branch(&instr, addr + 0x100, 0); + err = create_branch(&instr, &instr, addr + 0x100, 0); check(instr_is_branch_to_addr(&instr, addr + 0x100)); /* Maximum relative negative offset, - 32 MB */ - instr = create_branch(&instr, addr - 0x2000000, BRANCH_SET_LINK); + err = create_branch(&instr, &instr, addr - 0x2000000, BRANCH_SET_LINK); check(instr_is_branch_to_addr(&instr, addr - 0x2000000)); /* Out of range relative negative offset, - 32 MB + 4*/ - instr = create_branch(&instr, addr - 0x2000004, BRANCH_SET_LINK); - check(instr == 0); + err = create_branch(&instr, &instr, addr - 0x2000004, BRANCH_SET_LINK); + check(err); /* Out of range relative positive offset, + 32 MB */ - instr = create_branch(&instr, addr + 0x2000000, BRANCH_SET_LINK); - check(instr == 0); + err = create_branch(&instr, &instr, addr + 0x2000000, BRANCH_SET_LINK); + check(err); /* Unaligned target */ - instr = create_branch(&instr, addr + 3, BRANCH_SET_LINK); - check(instr == 0); + err = create_branch(&instr, &instr, addr + 3, BRANCH_SET_LINK); + check(err); /* Check flags are masked correctly */ - instr = create_branch(&instr, addr, 0xFFFFFFFC); + err = create_branch(&instr, &instr, addr, 0xFFFFFFFC); check(instr_is_branch_to_addr(&instr, addr)); check(instr == 0x48000000); } @@ -480,16 +485,19 @@ static void __init test_create_function_call(void) { unsigned int *iptr; unsigned long dest; + unsigned int instr; /* Check we can create a function call */ iptr = (unsigned int *)ppc_function_entry(test_trampoline); dest = ppc_function_entry(test_create_function_call); - patch_instruction(iptr, create_branch(iptr, dest, BRANCH_SET_LINK)); + create_branch(&instr, iptr, dest, BRANCH_SET_LINK); + patch_instruction(iptr, instr); check(instr_is_branch_to_addr(iptr, dest)); } static void __init test_branch_bform(void) { + int err; unsigned long addr; unsigned int *iptr, instr, flags; @@ -525,35 +533,35 @@ static void __init test_branch_bform(void) flags = 0x3ff000 | BRANCH_SET_LINK; /* Branch to self */ - instr = create_cond_branch(iptr, addr, flags); + err = create_cond_branch(&instr, iptr, addr, flags); check(instr_is_branch_to_addr(&instr, addr)); /* Branch to self - 0x100 */ - instr = create_cond_branch(iptr, addr - 0x100, flags); + err = create_cond_branch(&instr, iptr, addr - 0x100, flags); check(instr_is_branch_to_addr(&instr, addr - 0x100)); /* Branch to self + 0x100 */ - instr = create_cond_branch(iptr, addr + 0x100, flags); + err = create_cond_branch(&instr, iptr, addr + 0x100, flags); check(instr_is_branch_to_addr(&instr, addr + 0x100)); /* Maximum relative negative offset, - 32 KB */ - instr = create_cond_branch(iptr, addr - 0x8000, flags); + err = create_cond_branch(&instr, iptr, addr - 0x8000, flags); check(instr_is_branch_to_addr(&instr, addr - 0x8000)); /* Out of range relative negative offset, - 32 KB + 4*/ - instr = create_cond_branch(iptr, addr - 0x8004, flags); - check(instr == 0); + err = create_cond_branch(&instr, iptr, addr - 0x8004, flags); + check(err); /* Out of range relative positive offset, + 32 KB */ - instr = create_cond_branch(iptr, addr + 0x8000, flags); - check(instr == 0); + err = create_cond_branch(&instr, iptr, addr + 0x8000, flags); + check(err); /* Unaligned target */ - instr = create_cond_branch(iptr, addr + 3, flags); - check(instr == 0); + err = create_cond_branch(&instr, iptr, addr + 3, flags); + check(err); /* Check flags are masked correctly */ - instr = create_cond_branch(iptr, addr, 0xFFFFFFFC); + err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC); check(instr_is_branch_to_addr(&instr, addr)); check(instr == 0x43FF0000); } @@ -562,6 +570,7 @@ static void __init test_translate_branch(void) { unsigned long addr; unsigned int *p, *q; + unsigned int instr; void *buf; buf = vmalloc(PAGE_ALIGN(0x2000000 + 1)); @@ -575,7 +584,8 @@ static void __init test_translate_branch(void) patch_branch(p, addr, 0); check(instr_is_branch_to_addr(p, addr)); q = p + 1; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(q, addr)); /* Maximum negative case, move b . to addr + 32 MB */ @@ -583,7 +593,8 @@ static void __init test_translate_branch(void) addr = (unsigned long)p; patch_branch(p, addr, 0); q = buf + 0x2000000; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); check(*q == 0x4a000000); @@ -593,7 +604,8 @@ static void __init test_translate_branch(void) addr = (unsigned long)p; patch_branch(p, addr, 0); q = buf + 4; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); check(*q == 0x49fffffc); @@ -603,7 +615,8 @@ static void __init test_translate_branch(void) addr = 0x1000000 + (unsigned long)buf; patch_branch(p, addr, BRANCH_SET_LINK); q = buf + 0x1400000; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); @@ -612,7 +625,8 @@ static void __init test_translate_branch(void) addr = 0x2000000 + (unsigned long)buf; patch_branch(p, addr, 0); q = buf + 4; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); @@ -622,18 +636,22 @@ static void __init test_translate_branch(void) /* Simple case, branch to self moved a little */ p = buf; addr = (unsigned long)p; - patch_instruction(p, create_cond_branch(p, addr, 0)); + create_cond_branch(&instr, p, addr, 0); + patch_instruction(p, instr); check(instr_is_branch_to_addr(p, addr)); q = p + 1; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(q, addr)); /* Maximum negative case, move b . to addr + 32 KB */ p = buf; addr = (unsigned long)p; - patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC)); + create_cond_branch(&instr, p, addr, 0xFFFFFFFC); + patch_instruction(p, instr); q = buf + 0x8000; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); check(*q == 0x43ff8000); @@ -641,9 +659,11 @@ static void __init test_translate_branch(void) /* Maximum positive case, move x to x - 32 KB + 4 */ p = buf + 0x8000; addr = (unsigned long)p; - patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC)); + create_cond_branch(&instr, p, addr, 0xFFFFFFFC); + patch_instruction(p, instr); q = buf + 4; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); check(*q == 0x43ff7ffc); @@ -651,18 +671,22 @@ static void __init test_translate_branch(void) /* Jump to x + 12 KB moved to x + 20 KB */ p = buf; addr = 0x3000 + (unsigned long)buf; - patch_instruction(p, create_cond_branch(p, addr, BRANCH_SET_LINK)); + create_cond_branch(&instr, p, addr, BRANCH_SET_LINK); + patch_instruction(p, instr); q = buf + 0x5000; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); /* Jump to x + 8 KB moved to x - 8 KB + 4 */ p = buf + 0x2000; addr = 0x4000 + (unsigned long)buf; - patch_instruction(p, create_cond_branch(p, addr, 0)); + create_cond_branch(&instr, p, addr, 0); + patch_instruction(p, instr); q = buf + 4; - patch_instruction(q, translate_branch(q, p)); + translate_branch(&instr, q, p); + patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 4ba634b89ce5..b129d7b4e7dd 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -44,6 +44,7 @@ static unsigned int *calc_addr(struct fixup_entry *fcur, long offset) static int patch_alt_instruction(unsigned int *src, unsigned int *dest, unsigned int *alt_start, unsigned int *alt_end) { + int err; unsigned int instr; instr = *src; @@ -53,8 +54,8 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, /* Branch within the section doesn't need translating */ if (target < alt_start || target > alt_end) { - instr = translate_branch(dest, src); - if (!instr) + err = translate_branch(&instr, dest, src); + if (err) return 1; } } -- cgit v1.2.3-59-g8ed1b From 753462512868674a788ecc77bb96752efb818785 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:26 +1000 Subject: powerpc: Use a macro for creating instructions from u32s In preparation for instructions having a more complex data type start using a macro, ppc_inst(), for making an instruction out of a u32. A macro is used so that instructions can be used as initializer elements. Currently this does nothing, but it will allow for creating a data type that can represent prefixed instructions. Signed-off-by: Jordan Niethe [mpe: Change include guard to _ASM_POWERPC_INST_H] Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-7-jniethe5@gmail.com --- arch/powerpc/include/asm/code-patching.h | 3 +- arch/powerpc/include/asm/inst.h | 11 ++++++ arch/powerpc/kernel/align.c | 1 + arch/powerpc/kernel/crash_dump.c | 3 +- arch/powerpc/kernel/epapr_paravirt.c | 3 +- arch/powerpc/kernel/hw_breakpoint.c | 3 +- arch/powerpc/kernel/jump_label.c | 3 +- arch/powerpc/kernel/kgdb.c | 5 +-- arch/powerpc/kernel/kprobes.c | 5 +-- arch/powerpc/kernel/module_64.c | 3 +- arch/powerpc/kernel/optprobes.c | 32 +++++++++-------- arch/powerpc/kernel/security.c | 12 ++++--- arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/kernel/trace/ftrace.c | 25 +++++++------- arch/powerpc/kernel/uprobes.c | 1 + arch/powerpc/kvm/emulate_loadstore.c | 2 +- arch/powerpc/lib/code-patching.c | 57 ++++++++++++++++--------------- arch/powerpc/lib/feature-fixups.c | 39 ++++++++++----------- arch/powerpc/lib/test_emulate_step.c | 39 ++++++++++----------- arch/powerpc/mm/nohash/8xx.c | 5 +-- arch/powerpc/perf/8xx-pmu.c | 9 ++--- arch/powerpc/platforms/86xx/mpc86xx_smp.c | 3 +- arch/powerpc/platforms/powermac/smp.c | 3 +- arch/powerpc/xmon/xmon.c | 7 ++-- 24 files changed, 156 insertions(+), 120 deletions(-) create mode 100644 arch/powerpc/include/asm/inst.h diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 351dda7215b6..48e021957ee5 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -11,6 +11,7 @@ #include #include #include +#include /* Flags for create_branch: * "b" == create_branch(addr, target, 0); @@ -48,7 +49,7 @@ static inline int patch_branch_site(s32 *site, unsigned long target, int flags) static inline int modify_instruction(unsigned int *addr, unsigned int clr, unsigned int set) { - return patch_instruction(addr, (*addr & ~clr) | set); + return patch_instruction(addr, ppc_inst((*addr & ~clr) | set)); } static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h new file mode 100644 index 000000000000..b2e93946ce68 --- /dev/null +++ b/arch/powerpc/include/asm/inst.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_INST_H +#define _ASM_POWERPC_INST_H + +/* + * Instruction data type for POWER + */ + +#define ppc_inst(x) (x) + +#endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 92045ed64976..86e9bf62f18c 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -24,6 +24,7 @@ #include #include #include +#include struct aligninfo { unsigned char len; diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 05745ddbd229..78e556b131db 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -18,6 +18,7 @@ #include #include #include +#include #ifdef DEBUG #include @@ -44,7 +45,7 @@ static void __init create_trampoline(unsigned long addr) * branch to "addr" we jump to ("addr" + 32 MB). Although it requires * two instructions it doesn't require any registers. */ - patch_instruction(p, PPC_INST_NOP); + patch_instruction(p, ppc_inst(PPC_INST_NOP)); patch_branch(++p, addr + PHYSICAL_START, 0); } diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 9d32158ce36f..e8eb72a65572 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -11,6 +11,7 @@ #include #include #include +#include #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) extern void epapr_ev_idle(void); @@ -36,7 +37,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node, return -1; for (i = 0; i < (len / 4); i++) { - u32 inst = be32_to_cpu(insts[i]); + u32 inst = ppc_inst(be32_to_cpu(insts[i])); patch_instruction(epapr_hypercall_start + i, inst); #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) patch_instruction(epapr_ev_idle_start + i, inst); diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 72f461bd70fb..46e09ac8b84a 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /* @@ -243,7 +244,7 @@ dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info) static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp, struct arch_hw_breakpoint *info) { - unsigned int instr = 0; + unsigned int instr = ppc_inst(0); int ret, type, size; struct instruction_op op; unsigned long addr = info->address; diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index ca37702bde97..daa4afce7ec8 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -6,6 +6,7 @@ #include #include #include +#include void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) @@ -15,5 +16,5 @@ void arch_jump_label_transform(struct jump_entry *entry, if (type == JUMP_LABEL_JMP) patch_branch(addr, entry->target, 0); else - patch_instruction(addr, PPC_INST_NOP); + patch_instruction(addr, ppc_inst(PPC_INST_NOP)); } diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 7dd55eb1259d..a6b38a19133f 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -26,6 +26,7 @@ #include #include #include +#include /* * This table contains the mapping between PowerPC hardware trap types, and @@ -424,7 +425,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) if (err) return err; - err = patch_instruction(addr, BREAK_INSTR); + err = patch_instruction(addr, ppc_inst(BREAK_INSTR)); if (err) return -EFAULT; @@ -439,7 +440,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) unsigned int instr = *(unsigned int *)bpt->saved_instr; unsigned int *addr = (unsigned int *)bpt->bpt_addr; - err = patch_instruction(addr, instr); + err = patch_instruction(addr, ppc_inst(instr)); if (err) return -EFAULT; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 81efb605113e..2378a7ed4438 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -23,6 +23,7 @@ #include #include #include +#include #include DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; @@ -138,13 +139,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe); void arch_arm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, BREAKPOINT_INSTRUCTION); + patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)); } NOKPROBE_SYMBOL(arch_arm_kprobe); void arch_disarm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, p->opcode); + patch_instruction(p->addr, ppc_inst(p->opcode)); } NOKPROBE_SYMBOL(arch_disarm_kprobe); diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index f808159f3dfd..f390451ad915 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -20,6 +20,7 @@ #include #include #include +#include /* FIXME: We don't do .init separately. To do this, we'd need to have a separate r2 value in the init and core section, and stub between @@ -491,7 +492,7 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me) * "link" branches and they don't return, so they don't need the r2 * restore afterwards. */ - if (!instr_is_relative_link_branch(*prev_insn)) + if (!instr_is_relative_link_branch(ppc_inst(*prev_insn))) return 1; if (*instruction != PPC_INST_NOP) { diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 445b3dad82dc..44006c4ca4f1 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -16,6 +16,7 @@ #include #include #include +#include #define TMPL_CALL_HDLR_IDX \ (optprobe_template_call_handler - optprobe_template_entry) @@ -147,13 +148,13 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) { /* addis r4,0,(insn)@h */ - patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) | - ((val >> 16) & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | + ((val >> 16) & 0xffff))); addr++; /* ori r4,r4,(insn)@l */ - patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) | - ___PPC_RS(4) | (val & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | + ___PPC_RS(4) | (val & 0xffff))); } /* @@ -163,28 +164,28 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) { /* lis r3,(op)@highest */ - patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) | - ((val >> 48) & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | + ((val >> 48) & 0xffff))); addr++; /* ori r3,r3,(op)@higher */ - patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 32) & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 32) & 0xffff))); addr++; /* rldicr r3,r3,32,31 */ - patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) | - ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31)); + patch_instruction(addr, ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | + ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); addr++; /* oris r3,r3,(op)@h */ - patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 16) & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 16) & 0xffff))); addr++; /* ori r3,r3,(op)@l */ - patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | (val & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | (val & 0xffff))); } int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) @@ -230,7 +231,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int); pr_devel("Copying template to %p, size %lu\n", buff, size); for (i = 0; i < size; i++) { - rc = patch_instruction(buff + i, *(optprobe_template_entry + i)); + rc = patch_instruction(buff + i, + ppc_inst(*(optprobe_template_entry + i))); if (rc < 0) goto error; } diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 479325baf6a9..d86701ce116b 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -16,6 +16,7 @@ #include #include #include +#include u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; @@ -439,9 +440,11 @@ static void toggle_count_cache_flush(bool enable) enable = false; if (!enable) { - patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); + patch_instruction_site(&patch__call_flush_count_cache, + ppc_inst(PPC_INST_NOP)); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); + patch_instruction_site(&patch__call_kvm_flush_link_stack, + ppc_inst(PPC_INST_NOP)); #endif pr_info("link-stack-flush: software flush disabled.\n"); link_stack_flush_enabled = false; @@ -464,7 +467,8 @@ static void toggle_count_cache_flush(bool enable) // If we just need to flush the link stack, patch an early return if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { - patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); + patch_instruction_site(&patch__flush_link_stack_return, + ppc_inst(PPC_INST_BLR)); no_count_cache_flush(); return; } @@ -475,7 +479,7 @@ static void toggle_count_cache_flush(bool enable) return; } - patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); + patch_instruction_site(&patch__flush_count_cache_return, ppc_inst(PPC_INST_BLR)); count_cache_flush_type = COUNT_CACHE_FLUSH_HW; pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 3a43e8e847c8..0536e4aed330 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -85,7 +85,7 @@ notrace void __init machine_init(u64 dt_ptr) /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); - patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP); + patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_INST_NOP)); create_cond_branch(&insn, addr, branch_target(addr), 0x820000); patch_instruction(addr, insn); /* replace b by bne cr0 */ diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 8799d891320c..00f69b7baa8a 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef CONFIG_DYNAMIC_FTRACE @@ -161,7 +162,7 @@ __ftrace_make_nop(struct module *mod, #ifdef CONFIG_MPROFILE_KERNEL /* When using -mkernel_profile there is no load to jump over */ - pop = PPC_INST_NOP; + pop = ppc_inst(PPC_INST_NOP); if (probe_kernel_read(&op, (void *)(ip - 4), 4)) { pr_err("Fetching instruction at %lx failed.\n", ip - 4); @@ -169,7 +170,7 @@ __ftrace_make_nop(struct module *mod, } /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ - if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) { + if (op != ppc_inst(PPC_INST_MFLR) && op != ppc_inst(PPC_INST_STD_LR)) { pr_err("Unexpected instruction %08x around bl _mcount\n", op); return -EINVAL; } @@ -188,7 +189,7 @@ __ftrace_make_nop(struct module *mod, * Use a b +8 to jump over the load. */ - pop = PPC_INST_BRANCH | 8; /* b +8 */ + pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */ /* * Check what is in the next instruction. We can see ld r2,40(r1), but @@ -199,7 +200,7 @@ __ftrace_make_nop(struct module *mod, return -EFAULT; } - if (op != PPC_INST_LD_TOC) { + if (op != ppc_inst(PPC_INST_LD_TOC)) { pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op); return -EINVAL; } @@ -275,7 +276,7 @@ __ftrace_make_nop(struct module *mod, return -EINVAL; } - op = PPC_INST_NOP; + op = ppc_inst(PPC_INST_NOP); if (patch_instruction((unsigned int *)ip, op)) return -EPERM; @@ -420,7 +421,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) } } - if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) { + if (patch_instruction((unsigned int *)ip, ppc_inst(PPC_INST_NOP))) { pr_err("Patching NOP failed.\n"); return -EPERM; } @@ -442,7 +443,7 @@ int ftrace_make_nop(struct module *mod, if (test_24bit_addr(ip, addr)) { /* within range */ old = ftrace_call_replace(ip, addr, 1); - new = PPC_INST_NOP; + new = ppc_inst(PPC_INST_NOP); return ftrace_modify_code(ip, old, new); } else if (core_kernel_text(ip)) return __ftrace_make_nop_kernel(rec, addr); @@ -496,7 +497,7 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) * The load offset is different depending on the ABI. For simplicity * just mask it out when doing the compare. */ - if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000)) + if (op0 != ppc_inst(0x48000008) || ((op1 & 0xffff0000) != 0xe8410000)) return 0; return 1; } @@ -505,7 +506,7 @@ static int expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) { /* look for patched "NOP" on ppc64 with -mprofile-kernel */ - if (op0 != PPC_INST_NOP) + if (op0 != ppc_inst(PPC_INST_NOP)) return 0; return 1; } @@ -588,7 +589,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EFAULT; /* It should be pointing to a nop */ - if (op != PPC_INST_NOP) { + if (op != ppc_inst(PPC_INST_NOP)) { pr_err("Expected NOP but have %x\n", op); return -EINVAL; } @@ -645,7 +646,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) return -EFAULT; } - if (op != PPC_INST_NOP) { + if (op != ppc_inst(PPC_INST_NOP)) { pr_err("Unexpected call sequence at %p: %x\n", ip, op); return -EINVAL; } @@ -676,7 +677,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) */ if (test_24bit_addr(ip, addr)) { /* within range */ - old = PPC_INST_NOP; + old = ppc_inst(PPC_INST_NOP); new = ftrace_call_replace(ip, addr, 1); return ftrace_modify_code(ip, old, new); } else if (core_kernel_text(ip)) diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 1cfef0e5fec5..31c870287f2b 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -14,6 +14,7 @@ #include #include +#include #define UPROBE_TRAP_NR UINT_MAX diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 1139bc56e004..135d0e686622 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -95,7 +95,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = EMULATE_FAIL; vcpu->arch.regs.msr = vcpu->arch.shared->msr; - if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { + if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 6ed3301c0582..6c30ddadd971 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -17,6 +17,7 @@ #include #include #include +#include static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, unsigned int *patch_addr) @@ -414,37 +415,37 @@ static void __init test_branch_iform(void) addr = (unsigned long)&instr; /* The simplest case, branch to self, no flags */ - check(instr_is_branch_iform(0x48000000)); + check(instr_is_branch_iform(ppc_inst(0x48000000))); /* All bits of target set, and flags */ - check(instr_is_branch_iform(0x4bffffff)); + check(instr_is_branch_iform(ppc_inst(0x4bffffff))); /* High bit of opcode set, which is wrong */ - check(!instr_is_branch_iform(0xcbffffff)); + check(!instr_is_branch_iform(ppc_inst(0xcbffffff))); /* Middle bits of opcode set, which is wrong */ - check(!instr_is_branch_iform(0x7bffffff)); + check(!instr_is_branch_iform(ppc_inst(0x7bffffff))); /* Simplest case, branch to self with link */ - check(instr_is_branch_iform(0x48000001)); + check(instr_is_branch_iform(ppc_inst(0x48000001))); /* All bits of targets set */ - check(instr_is_branch_iform(0x4bfffffd)); + check(instr_is_branch_iform(ppc_inst(0x4bfffffd))); /* Some bits of targets set */ - check(instr_is_branch_iform(0x4bff00fd)); + check(instr_is_branch_iform(ppc_inst(0x4bff00fd))); /* Must be a valid branch to start with */ - check(!instr_is_branch_iform(0x7bfffffd)); + check(!instr_is_branch_iform(ppc_inst(0x7bfffffd))); /* Absolute branch to 0x100 */ - instr = 0x48000103; + instr = ppc_inst(0x48000103); check(instr_is_branch_to_addr(&instr, 0x100)); /* Absolute branch to 0x420fc */ - instr = 0x480420ff; + instr = ppc_inst(0x480420ff); check(instr_is_branch_to_addr(&instr, 0x420fc)); /* Maximum positive relative branch, + 20MB - 4B */ - instr = 0x49fffffc; + instr = ppc_inst(0x49fffffc); check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC)); /* Smallest negative relative branch, - 4B */ - instr = 0x4bfffffc; + instr = ppc_inst(0x4bfffffc); check(instr_is_branch_to_addr(&instr, addr - 4)); /* Largest negative relative branch, - 32 MB */ - instr = 0x4a000000; + instr = ppc_inst(0x4a000000); check(instr_is_branch_to_addr(&instr, addr - 0x2000000)); /* Branch to self, with link */ @@ -478,7 +479,7 @@ static void __init test_branch_iform(void) /* Check flags are masked correctly */ err = create_branch(&instr, &instr, addr, 0xFFFFFFFC); check(instr_is_branch_to_addr(&instr, addr)); - check(instr == 0x48000000); + check(instr == ppc_inst(0x48000000)); } static void __init test_create_function_call(void) @@ -505,28 +506,28 @@ static void __init test_branch_bform(void) addr = (unsigned long)iptr; /* The simplest case, branch to self, no flags */ - check(instr_is_branch_bform(0x40000000)); + check(instr_is_branch_bform(ppc_inst(0x40000000))); /* All bits of target set, and flags */ - check(instr_is_branch_bform(0x43ffffff)); + check(instr_is_branch_bform(ppc_inst(0x43ffffff))); /* High bit of opcode set, which is wrong */ - check(!instr_is_branch_bform(0xc3ffffff)); + check(!instr_is_branch_bform(ppc_inst(0xc3ffffff))); /* Middle bits of opcode set, which is wrong */ - check(!instr_is_branch_bform(0x7bffffff)); + check(!instr_is_branch_bform(ppc_inst(0x7bffffff))); /* Absolute conditional branch to 0x100 */ - instr = 0x43ff0103; + instr = ppc_inst(0x43ff0103); check(instr_is_branch_to_addr(&instr, 0x100)); /* Absolute conditional branch to 0x20fc */ - instr = 0x43ff20ff; + instr = ppc_inst(0x43ff20ff); check(instr_is_branch_to_addr(&instr, 0x20fc)); /* Maximum positive relative conditional branch, + 32 KB - 4B */ - instr = 0x43ff7ffc; + instr = ppc_inst(0x43ff7ffc); check(instr_is_branch_to_addr(&instr, addr + 0x7FFC)); /* Smallest negative relative conditional branch, - 4B */ - instr = 0x43fffffc; + instr = ppc_inst(0x43fffffc); check(instr_is_branch_to_addr(&instr, addr - 4)); /* Largest negative relative conditional branch, - 32 KB */ - instr = 0x43ff8000; + instr = ppc_inst(0x43ff8000); check(instr_is_branch_to_addr(&instr, addr - 0x8000)); /* All condition code bits set & link */ @@ -563,7 +564,7 @@ static void __init test_branch_bform(void) /* Check flags are masked correctly */ err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC); check(instr_is_branch_to_addr(&instr, addr)); - check(instr == 0x43FF0000); + check(instr == ppc_inst(0x43FF0000)); } static void __init test_translate_branch(void) @@ -597,7 +598,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == 0x4a000000); + check(*q == ppc_inst(0x4a000000)); /* Maximum positive case, move x to x - 32 MB + 4 */ p = buf + 0x2000000; @@ -608,7 +609,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == 0x49fffffc); + check(*q == ppc_inst(0x49fffffc)); /* Jump to x + 16 MB moved to x + 20 MB */ p = buf; @@ -654,7 +655,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == 0x43ff8000); + check(*q == ppc_inst(0x43ff8000)); /* Maximum positive case, move x to x - 32 KB + 4 */ p = buf + 0x8000; @@ -666,7 +667,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == 0x43ff7ffc); + check(*q == ppc_inst(0x43ff7ffc)); /* Jump to x + 12 KB moved to x + 20 KB */ p = buf; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index b129d7b4e7dd..6e7479b8887a 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -21,6 +21,7 @@ #include #include #include +#include struct fixup_entry { unsigned long mask; @@ -89,7 +90,7 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) } for (; dest < end; dest++) - raw_patch_instruction(dest, PPC_INST_NOP); + raw_patch_instruction(dest, ppc_inst(PPC_INST_NOP)); return 0; } @@ -146,15 +147,15 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instrs[0]); + patch_instruction(dest, ppc_inst(instrs[0])); if (types & STF_BARRIER_FALLBACK) patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK); else - patch_instruction(dest + 1, instrs[1]); + patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, instrs[2]); + patch_instruction(dest + 2, ppc_inst(instrs[2])); } printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, @@ -207,12 +208,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instrs[0]); - patch_instruction(dest + 1, instrs[1]); - patch_instruction(dest + 2, instrs[2]); - patch_instruction(dest + 3, instrs[3]); - patch_instruction(dest + 4, instrs[4]); - patch_instruction(dest + 5, instrs[5]); + patch_instruction(dest, ppc_inst(instrs[0])); + patch_instruction(dest + 1, ppc_inst(instrs[1])); + patch_instruction(dest + 2, ppc_inst(instrs[2])); + patch_instruction(dest + 3, ppc_inst(instrs[3])); + patch_instruction(dest + 4, ppc_inst(instrs[4])); + patch_instruction(dest + 5, ppc_inst(instrs[5])); } printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, (types == STF_BARRIER_NONE) ? "no" : @@ -260,9 +261,9 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instrs[0]); - patch_instruction(dest + 1, instrs[1]); - patch_instruction(dest + 2, instrs[2]); + patch_instruction(dest, ppc_inst(instrs[0])); + patch_instruction(dest + 1, ppc_inst(instrs[1])); + patch_instruction(dest + 2, ppc_inst(instrs[2])); } printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, @@ -295,7 +296,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ dest = (void *)start + *start; pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instr); + patch_instruction(dest, ppc_inst(instr)); } printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); @@ -338,8 +339,8 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ dest = (void *)start + *start; pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instr[0]); - patch_instruction(dest + 1, instr[1]); + patch_instruction(dest, ppc_inst(instr[0])); + patch_instruction(dest + 1, ppc_inst(instr[1])); } printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); @@ -353,7 +354,7 @@ static void patch_btb_flush_section(long *curr) end = (void *)curr + *(curr + 1); for (; start < end; start++) { pr_devel("patching dest %lx\n", (unsigned long)start); - patch_instruction(start, PPC_INST_NOP); + patch_instruction(start, ppc_inst(PPC_INST_NOP)); } } @@ -382,7 +383,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) for (; start < end; start++) { dest = (void *)start + *start; - raw_patch_instruction(dest, PPC_INST_LWSYNC); + raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC)); } } @@ -400,7 +401,7 @@ static void do_final_fixups(void) length = (__end_interrupts - _stext) / sizeof(int); while (length--) { - raw_patch_instruction(dest, *src); + raw_patch_instruction(dest, ppc_inst(*src)); src++; dest++; } diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 53df4146dd32..85d62f16d07a 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -11,6 +11,7 @@ #include #include #include +#include #define IMM_L(i) ((uintptr_t)(i) & 0xffff) #define IMM_DS(i) ((uintptr_t)(i) & 0xfffc) @@ -19,40 +20,40 @@ * Defined with TEST_ prefix so it does not conflict with other * definitions. */ -#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \ +#define TEST_LD(r, base, i) ppc_inst(PPC_INST_LD | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_DS(i)) -#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \ +#define TEST_LWZ(r, base, i) ppc_inst(PPC_INST_LWZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) -#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \ +#define TEST_LWZX(t, a, b) ppc_inst(PPC_INST_LWZX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \ +#define TEST_STD(r, base, i) ppc_inst(PPC_INST_STD | ___PPC_RS(r) | \ ___PPC_RA(base) | IMM_DS(i)) -#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \ +#define TEST_LDARX(t, a, b, eh) ppc_inst(PPC_INST_LDARX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b) | \ __PPC_EH(eh)) -#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \ +#define TEST_STDCX(s, a, b) ppc_inst(PPC_INST_STDCX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \ +#define TEST_LFSX(t, a, b) ppc_inst(PPC_INST_LFSX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \ +#define TEST_STFSX(s, a, b) ppc_inst(PPC_INST_STFSX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \ +#define TEST_LFDX(t, a, b) ppc_inst(PPC_INST_LFDX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \ +#define TEST_STFDX(s, a, b) ppc_inst(PPC_INST_STFDX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \ +#define TEST_LVX(t, a, b) ppc_inst(PPC_INST_LVX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \ +#define TEST_STVX(s, a, b) ppc_inst(PPC_INST_STVX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) -#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) -#define TEST_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \ +#define TEST_LXVD2X(s, a, b) ppc_inst(PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) +#define TEST_STXVD2X(s, a, b) ppc_inst(PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) +#define TEST_ADD(t, a, b) ppc_inst(PPC_INST_ADD | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \ +#define TEST_ADD_DOT(t, a, b) ppc_inst(PPC_INST_ADD | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b) | 0x1) -#define TEST_ADDC(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \ +#define TEST_ADDC(t, a, b) ppc_inst(PPC_INST_ADDC | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_ADDC_DOT(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \ +#define TEST_ADDC_DOT(t, a, b) ppc_inst(PPC_INST_ADDC | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define MAX_SUBTESTS 16 @@ -472,7 +473,7 @@ static struct compute_test compute_tests[] = { .subtests = { { .descr = "R0 = LONG_MAX", - .instr = PPC_INST_NOP, + .instr = ppc_inst(PPC_INST_NOP), .regs = { .gpr[0] = LONG_MAX, } diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 3189308dece4..b27017109a36 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -101,7 +102,7 @@ static void mmu_patch_addis(s32 *site, long simm) instr &= 0xffff0000; instr |= ((unsigned long)simm) >> 16; - patch_instruction_site(site, instr); + patch_instruction_site(site, ppc_inst(instr)); } static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) @@ -125,7 +126,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) mapped = 0; mmu_mapin_immr(); if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR)) - patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP); + patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP)); if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0); } else { diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c index 1ad03c55c88c..acc27fc63eb7 100644 --- a/arch/powerpc/perf/8xx-pmu.c +++ b/arch/powerpc/perf/8xx-pmu.c @@ -15,6 +15,7 @@ #include #include #include +#include #define PERF_8xx_ID_CPU_CYCLES 1 #define PERF_8xx_ID_HW_INSTRUCTIONS 2 @@ -170,8 +171,8 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) case PERF_8xx_ID_ITLB_LOAD_MISS: if (atomic_dec_return(&itlb_miss_ref) == 0) { /* mfspr r10, SPRN_SPRG_SCRATCH0 */ - unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) | - __PPC_SPR(SPRN_SPRG_SCRATCH0); + struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) | + __PPC_SPR(SPRN_SPRG_SCRATCH0)); patch_instruction_site(&patch__itlbmiss_exit_1, insn); #ifndef CONFIG_PIN_TLB_TEXT @@ -182,8 +183,8 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) case PERF_8xx_ID_DTLB_LOAD_MISS: if (atomic_dec_return(&dtlb_miss_ref) == 0) { /* mfspr r10, SPRN_DAR */ - unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) | - __PPC_SPR(SPRN_DAR); + struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) | + __PPC_SPR(SPRN_DAR)); patch_instruction_site(&patch__dtlbmiss_exit_1, insn); patch_instruction_site(&patch__dtlbmiss_exit_2, insn); diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 5b91ea5694e3..31540ebf1e29 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -82,7 +83,7 @@ smp_86xx_kick_cpu(int nr) mdelay(1); /* Restore the exception vector */ - patch_instruction(vector, save_vector); + patch_instruction(vector, ppc_inst(save_vector)); local_irq_restore(flags); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index be2ab5b11e57..44a00990af9d 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "pmac.h" @@ -826,7 +827,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - patch_instruction(vector, save_vector); + patch_instruction(vector, ppc_inst(save_vector)); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 0fa3aaeee105..a56dcb004396 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -54,6 +54,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include @@ -946,7 +947,7 @@ static void remove_bpts(void) if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP) continue; if (mread(bp->address, &instr, 4) == 4 - && instr == bpinstr + && instr == ppc_inst(bpinstr) && patch_instruction( (unsigned int *)bp->address, bp->instr[0]) != 0) printf("Couldn't remove breakpoint at %lx\n", @@ -2847,7 +2848,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, { int nr, dotted; unsigned long first_adr; - unsigned int inst, last_inst = 0; + unsigned int inst, last_inst = ppc_inst(0); unsigned char val[4]; dotted = 0; @@ -2860,7 +2861,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, } break; } - inst = GETWORD(val); + inst = ppc_inst(GETWORD(val)); if (adr > first_adr && inst == last_inst) { if (!dotted) { printf(" ...\n"); -- cgit v1.2.3-59-g8ed1b From 777e26f0edf8dab58b8dd474d35d83bde0ac6d76 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:27 +1000 Subject: powerpc: Use an accessor for instructions In preparation for introducing a more complicated instruction type to accommodate prefixed instructions use an accessor for getting an instruction as a u32. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-8-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 5 + arch/powerpc/include/asm/sstep.h | 6 +- arch/powerpc/kernel/align.c | 6 +- arch/powerpc/kernel/kprobes.c | 2 +- arch/powerpc/kernel/trace/ftrace.c | 30 ++-- arch/powerpc/kernel/vecemu.c | 16 ++- arch/powerpc/lib/code-patching.c | 18 +-- arch/powerpc/lib/sstep.c | 268 ++++++++++++++++++----------------- arch/powerpc/lib/test_emulate_step.c | 8 +- arch/powerpc/mm/fault.c | 6 +- arch/powerpc/xmon/xmon.c | 4 +- 11 files changed, 190 insertions(+), 179 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index b2e93946ce68..e8e436fbbbf6 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -8,4 +8,9 @@ #define ppc_inst(x) (x) +static inline u32 ppc_inst_val(u32 x) +{ + return x; +} + #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 769f055509c9..26d729562fe2 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -15,9 +15,9 @@ struct pt_regs; * Note that IS_MTMSRD returns true for both an mtmsr (32-bit) * and an mtmsrd (64-bit). */ -#define IS_MTMSRD(instr) (((instr) & 0xfc0007be) == 0x7c000124) -#define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024) -#define IS_RFI(instr) (((instr) & 0xfc0007fe) == 0x4c000064) +#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124) +#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024) +#define IS_RFI(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064) enum instruction_type { COMPUTE, /* arith/logical/CR op, etc. */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 86e9bf62f18c..44921001f84a 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -314,8 +314,8 @@ int fix_alignment(struct pt_regs *regs) } #ifdef CONFIG_SPE - if ((instr >> 26) == 0x4) { - int reg = (instr >> 21) & 0x1f; + if ((ppc_inst_val(instr) >> 26) == 0x4) { + int reg = (ppc_inst_val(instr) >> 21) & 0x1f; PPC_WARN_ALIGNMENT(spe, regs); return emulate_spe(regs, reg, instr); } @@ -332,7 +332,7 @@ int fix_alignment(struct pt_regs *regs) * when pasting to a co-processor. Furthermore, paste_last is the * synchronisation point for preceding copy/paste sequences. */ - if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe)) + if ((ppc_inst_val(instr) & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe)) return -EIO; r = analyse_instr(&op, regs, instr); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 2378a7ed4438..92fa3070d905 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -234,7 +234,7 @@ static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) * So, we should never get here... but, its still * good to catch them, just in case... */ - printk("Can't step on instruction %x\n", insn); + printk("Can't step on instruction %x\n", ppc_inst_val(insn)); BUG(); } else { /* diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 00f69b7baa8a..cc23c63f3769 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -74,7 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) /* Make sure it is what we expect it to be */ if (replaced != old) { pr_err("%p: replaced (%#x) != old (%#x)", - (void *)ip, replaced, old); + (void *)ip, ppc_inst_val(replaced), ppc_inst_val(old)); return -EINVAL; } @@ -99,19 +99,19 @@ static int test_24bit_addr(unsigned long ip, unsigned long addr) static int is_bl_op(unsigned int op) { - return (op & 0xfc000003) == 0x48000001; + return (ppc_inst_val(op) & 0xfc000003) == 0x48000001; } static int is_b_op(unsigned int op) { - return (op & 0xfc000003) == 0x48000000; + return (ppc_inst_val(op) & 0xfc000003) == 0x48000000; } static unsigned long find_bl_target(unsigned long ip, unsigned int op) { int offset; - offset = (op & 0x03fffffc); + offset = (ppc_inst_val(op) & 0x03fffffc); /* make it signed */ if (offset & 0x02000000) offset |= 0xfe000000; @@ -137,7 +137,7 @@ __ftrace_make_nop(struct module *mod, /* Make sure that that this is still a 24bit jump */ if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %x\n", op); + pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op)); return -EINVAL; } @@ -171,7 +171,8 @@ __ftrace_make_nop(struct module *mod, /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ if (op != ppc_inst(PPC_INST_MFLR) && op != ppc_inst(PPC_INST_STD_LR)) { - pr_err("Unexpected instruction %08x around bl _mcount\n", op); + pr_err("Unexpected instruction %08x around bl _mcount\n", + ppc_inst_val(op)); return -EINVAL; } #else @@ -201,7 +202,7 @@ __ftrace_make_nop(struct module *mod, } if (op != ppc_inst(PPC_INST_LD_TOC)) { - pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op); + pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, ppc_inst_val(op)); return -EINVAL; } #endif /* CONFIG_MPROFILE_KERNEL */ @@ -229,7 +230,7 @@ __ftrace_make_nop(struct module *mod, /* Make sure that that this is still a 24bit jump */ if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %x\n", op); + pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op)); return -EINVAL; } @@ -403,7 +404,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) /* Make sure that that this is still a 24bit jump */ if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %x\n", op); + pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op)); return -EINVAL; } @@ -497,7 +498,8 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) * The load offset is different depending on the ABI. For simplicity * just mask it out when doing the compare. */ - if (op0 != ppc_inst(0x48000008) || ((op1 & 0xffff0000) != 0xe8410000)) + if (op0 != ppc_inst(0x48000008) || + (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000) return 0; return 1; } @@ -527,7 +529,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (!expected_nop_sequence(ip, op[0], op[1])) { pr_err("Unexpected call sequence at %p: %x %x\n", - ip, op[0], op[1]); + ip, ppc_inst_val(op[0]), ppc_inst_val(op[1])); return -EINVAL; } @@ -590,7 +592,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) /* It should be pointing to a nop */ if (op != ppc_inst(PPC_INST_NOP)) { - pr_err("Expected NOP but have %x\n", op); + pr_err("Expected NOP but have %x\n", ppc_inst_val(op)); return -EINVAL; } @@ -647,7 +649,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) } if (op != ppc_inst(PPC_INST_NOP)) { - pr_err("Unexpected call sequence at %p: %x\n", ip, op); + pr_err("Unexpected call sequence at %p: %x\n", ip, ppc_inst_val(op)); return -EINVAL; } @@ -726,7 +728,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, /* Make sure that that this is still a 24bit jump */ if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %x\n", op); + pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op)); return -EINVAL; } diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index 4acd3fb2b38e..1f5e3b4c8ae4 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -260,21 +260,23 @@ static unsigned int rfin(unsigned int x) int emulate_altivec(struct pt_regs *regs) { - unsigned int instr, i; + unsigned int instr, i, word; unsigned int va, vb, vc, vd; vector128 *vrs; if (get_user(instr, (unsigned int __user *) regs->nip)) return -EFAULT; - if ((instr >> 26) != 4) + + word = ppc_inst_val(instr); + if ((word >> 26) != 4) return -EINVAL; /* not an altivec instruction */ - vd = (instr >> 21) & 0x1f; - va = (instr >> 16) & 0x1f; - vb = (instr >> 11) & 0x1f; - vc = (instr >> 6) & 0x1f; + vd = (word >> 21) & 0x1f; + va = (word >> 16) & 0x1f; + vb = (word >> 11) & 0x1f; + vc = (word >> 6) & 0x1f; vrs = current->thread.vr_state.vr; - switch (instr & 0x3f) { + switch (word & 0x3f) { case 10: switch (vc) { case 0: /* vaddfp */ diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 6c30ddadd971..baa849b1a1f9 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -236,7 +236,7 @@ bool is_conditional_branch(unsigned int instr) if (opcode == 16) /* bc, bca, bcl, bcla */ return true; if (opcode == 19) { - switch ((instr >> 1) & 0x3ff) { + switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { case 16: /* bclr, bclrl */ case 528: /* bcctr, bcctrl */ case 560: /* bctar, bctarl */ @@ -304,7 +304,7 @@ static int instr_is_branch_bform(unsigned int instr) int instr_is_relative_branch(unsigned int instr) { - if (instr & BRANCH_ABSOLUTE) + if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) return 0; return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); @@ -312,20 +312,20 @@ int instr_is_relative_branch(unsigned int instr) int instr_is_relative_link_branch(unsigned int instr) { - return instr_is_relative_branch(instr) && (instr & BRANCH_SET_LINK); + return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); } static unsigned long branch_iform_target(const unsigned int *instr) { signed long imm; - imm = *instr & 0x3FFFFFC; + imm = ppc_inst_val(*instr) & 0x3FFFFFC; /* If the top bit of the immediate value is set this is negative */ if (imm & 0x2000000) imm -= 0x4000000; - if ((*instr & BRANCH_ABSOLUTE) == 0) + if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0) imm += (unsigned long)instr; return (unsigned long)imm; @@ -335,13 +335,13 @@ static unsigned long branch_bform_target(const unsigned int *instr) { signed long imm; - imm = *instr & 0xFFFC; + imm = ppc_inst_val(*instr) & 0xFFFC; /* If the top bit of the immediate value is set this is negative */ if (imm & 0x8000) imm -= 0x10000; - if ((*instr & BRANCH_ABSOLUTE) == 0) + if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0) imm += (unsigned long)instr; return (unsigned long)imm; @@ -373,9 +373,9 @@ int translate_branch(unsigned int *instr, const unsigned int *dest, target = branch_target(src); if (instr_is_branch_iform(*src)) - return create_branch(instr, dest, target, *src); + return create_branch(instr, dest, target, ppc_inst_val(*src)); else if (instr_is_branch_bform(*src)) - return create_cond_branch(instr, dest, target, *src); + return create_cond_branch(instr, dest, target, ppc_inst_val(*src)); return 1; } diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5f3a7bd9d90d..14c93ee4ffc8 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1169,26 +1169,28 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, unsigned long int imm; unsigned long int val, val2; unsigned int mb, me, sh; + unsigned int word; long ival; + word = ppc_inst_val(instr); op->type = COMPUTE; opcode = instr >> 26; switch (opcode) { case 16: /* bc */ op->type = BRANCH; - imm = (signed short)(instr & 0xfffc); - if ((instr & 2) == 0) + imm = (signed short)(word & 0xfffc); + if ((word & 2) == 0) imm += regs->nip; op->val = truncate_if_32bit(regs->msr, imm); - if (instr & 1) + if (word & 1) op->type |= SETLK; - if (branch_taken(instr, regs, op)) + if (branch_taken(word, regs, op)) op->type |= BRTAKEN; return 1; #ifdef CONFIG_PPC64 case 17: /* sc */ - if ((instr & 0xfe2) == 2) + if ((word & 0xfe2) == 2) op->type = SYSCALL; else op->type = UNKNOWN; @@ -1196,21 +1198,21 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #endif case 18: /* b */ op->type = BRANCH | BRTAKEN; - imm = instr & 0x03fffffc; + imm = word & 0x03fffffc; if (imm & 0x02000000) imm -= 0x04000000; - if ((instr & 2) == 0) + if ((word & 2) == 0) imm += regs->nip; op->val = truncate_if_32bit(regs->msr, imm); - if (instr & 1) + if (word & 1) op->type |= SETLK; return 1; case 19: - switch ((instr >> 1) & 0x3ff) { + switch ((word >> 1) & 0x3ff) { case 0: /* mcrf */ op->type = COMPUTE + SETCC; - rd = 7 - ((instr >> 23) & 0x7); - ra = 7 - ((instr >> 18) & 0x7); + rd = 7 - ((word >> 23) & 0x7); + ra = 7 - ((word >> 18) & 0x7); rd *= 4; ra *= 4; val = (regs->ccr >> ra) & 0xf; @@ -1220,11 +1222,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 16: /* bclr */ case 528: /* bcctr */ op->type = BRANCH; - imm = (instr & 0x400)? regs->ctr: regs->link; + imm = (word & 0x400)? regs->ctr: regs->link; op->val = truncate_if_32bit(regs->msr, imm); - if (instr & 1) + if (word & 1) op->type |= SETLK; - if (branch_taken(instr, regs, op)) + if (branch_taken(word, regs, op)) op->type |= BRTAKEN; return 1; @@ -1247,23 +1249,23 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 417: /* crorc */ case 449: /* cror */ op->type = COMPUTE + SETCC; - ra = (instr >> 16) & 0x1f; - rb = (instr >> 11) & 0x1f; - rd = (instr >> 21) & 0x1f; + ra = (word >> 16) & 0x1f; + rb = (word >> 11) & 0x1f; + rd = (word >> 21) & 0x1f; ra = (regs->ccr >> (31 - ra)) & 1; rb = (regs->ccr >> (31 - rb)) & 1; - val = (instr >> (6 + ra * 2 + rb)) & 1; + val = (word >> (6 + ra * 2 + rb)) & 1; op->ccval = (regs->ccr & ~(1UL << (31 - rd))) | (val << (31 - rd)); return 1; } break; case 31: - switch ((instr >> 1) & 0x3ff) { + switch ((word >> 1) & 0x3ff) { case 598: /* sync */ op->type = BARRIER + BARRIER_SYNC; #ifdef __powerpc64__ - switch ((instr >> 21) & 3) { + switch ((word >> 21) & 3) { case 1: /* lwsync */ op->type = BARRIER + BARRIER_LWSYNC; break; @@ -1285,20 +1287,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, if (!FULL_REGS(regs)) return -1; - rd = (instr >> 21) & 0x1f; - ra = (instr >> 16) & 0x1f; - rb = (instr >> 11) & 0x1f; - rc = (instr >> 6) & 0x1f; + rd = (word >> 21) & 0x1f; + ra = (word >> 16) & 0x1f; + rb = (word >> 11) & 0x1f; + rc = (word >> 6) & 0x1f; switch (opcode) { #ifdef __powerpc64__ case 2: /* tdi */ - if (rd & trap_compare(regs->gpr[ra], (short) instr)) + if (rd & trap_compare(regs->gpr[ra], (short) word)) goto trap; return 1; #endif case 3: /* twi */ - if (rd & trap_compare((int)regs->gpr[ra], (short) instr)) + if (rd & trap_compare((int)regs->gpr[ra], (short) word)) goto trap; return 1; @@ -1307,7 +1309,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, if (!cpu_has_feature(CPU_FTR_ARCH_300)) return -1; - switch (instr & 0x3f) { + switch (word & 0x3f) { case 48: /* maddhd */ asm volatile(PPC_MADDHD(%0, %1, %2, %3) : "=r" (op->val) : "r" (regs->gpr[ra]), @@ -1335,16 +1337,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #endif case 7: /* mulli */ - op->val = regs->gpr[ra] * (short) instr; + op->val = regs->gpr[ra] * (short) word; goto compute_done; case 8: /* subfic */ - imm = (short) instr; + imm = (short) word; add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1); return 1; case 10: /* cmpli */ - imm = (unsigned short) instr; + imm = (unsigned short) word; val = regs->gpr[ra]; #ifdef __powerpc64__ if ((rd & 1) == 0) @@ -1354,7 +1356,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 1; case 11: /* cmpi */ - imm = (short) instr; + imm = (short) word; val = regs->gpr[ra]; #ifdef __powerpc64__ if ((rd & 1) == 0) @@ -1364,35 +1366,35 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 1; case 12: /* addic */ - imm = (short) instr; + imm = (short) word; add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); return 1; case 13: /* addic. */ - imm = (short) instr; + imm = (short) word; add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); set_cr0(regs, op); return 1; case 14: /* addi */ - imm = (short) instr; + imm = (short) word; if (ra) imm += regs->gpr[ra]; op->val = imm; goto compute_done; case 15: /* addis */ - imm = ((short) instr) << 16; + imm = ((short) word) << 16; if (ra) imm += regs->gpr[ra]; op->val = imm; goto compute_done; case 19: - if (((instr >> 1) & 0x1f) == 2) { + if (((word >> 1) & 0x1f) == 2) { /* addpcis */ - imm = (short) (instr & 0xffc1); /* d0 + d2 fields */ - imm |= (instr >> 15) & 0x3e; /* d1 field */ + imm = (short) (word & 0xffc1); /* d0 + d2 fields */ + imm |= (word >> 15) & 0x3e; /* d1 field */ op->val = regs->nip + (imm << 16) + 4; goto compute_done; } @@ -1400,65 +1402,65 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 0; case 20: /* rlwimi */ - mb = (instr >> 6) & 0x1f; - me = (instr >> 1) & 0x1f; + mb = (word >> 6) & 0x1f; + me = (word >> 1) & 0x1f; val = DATA32(regs->gpr[rd]); imm = MASK32(mb, me); op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); goto logical_done; case 21: /* rlwinm */ - mb = (instr >> 6) & 0x1f; - me = (instr >> 1) & 0x1f; + mb = (word >> 6) & 0x1f; + me = (word >> 1) & 0x1f; val = DATA32(regs->gpr[rd]); op->val = ROTATE(val, rb) & MASK32(mb, me); goto logical_done; case 23: /* rlwnm */ - mb = (instr >> 6) & 0x1f; - me = (instr >> 1) & 0x1f; + mb = (word >> 6) & 0x1f; + me = (word >> 1) & 0x1f; rb = regs->gpr[rb] & 0x1f; val = DATA32(regs->gpr[rd]); op->val = ROTATE(val, rb) & MASK32(mb, me); goto logical_done; case 24: /* ori */ - op->val = regs->gpr[rd] | (unsigned short) instr; + op->val = regs->gpr[rd] | (unsigned short) word; goto logical_done_nocc; case 25: /* oris */ - imm = (unsigned short) instr; + imm = (unsigned short) word; op->val = regs->gpr[rd] | (imm << 16); goto logical_done_nocc; case 26: /* xori */ - op->val = regs->gpr[rd] ^ (unsigned short) instr; + op->val = regs->gpr[rd] ^ (unsigned short) word; goto logical_done_nocc; case 27: /* xoris */ - imm = (unsigned short) instr; + imm = (unsigned short) word; op->val = regs->gpr[rd] ^ (imm << 16); goto logical_done_nocc; case 28: /* andi. */ - op->val = regs->gpr[rd] & (unsigned short) instr; + op->val = regs->gpr[rd] & (unsigned short) word; set_cr0(regs, op); goto logical_done_nocc; case 29: /* andis. */ - imm = (unsigned short) instr; + imm = (unsigned short) word; op->val = regs->gpr[rd] & (imm << 16); set_cr0(regs, op); goto logical_done_nocc; #ifdef __powerpc64__ case 30: /* rld* */ - mb = ((instr >> 6) & 0x1f) | (instr & 0x20); + mb = ((word >> 6) & 0x1f) | (word & 0x20); val = regs->gpr[rd]; - if ((instr & 0x10) == 0) { - sh = rb | ((instr & 2) << 4); + if ((word & 0x10) == 0) { + sh = rb | ((word & 2) << 4); val = ROTATE(val, sh); - switch ((instr >> 2) & 3) { + switch ((word >> 2) & 3) { case 0: /* rldicl */ val &= MASK64_L(mb); break; @@ -1478,7 +1480,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, } else { sh = regs->gpr[rb] & 0x3f; val = ROTATE(val, sh); - switch ((instr >> 1) & 7) { + switch ((word >> 1) & 7) { case 0: /* rldcl */ op->val = val & MASK64_L(mb); goto logical_done; @@ -1493,8 +1495,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 31: /* isel occupies 32 minor opcodes */ - if (((instr >> 1) & 0x1f) == 15) { - mb = (instr >> 6) & 0x1f; /* bc field */ + if (((word >> 1) & 0x1f) == 15) { + mb = (word >> 6) & 0x1f; /* bc field */ val = (regs->ccr >> (31 - mb)) & 1; val2 = (ra) ? regs->gpr[ra] : 0; @@ -1502,7 +1504,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, goto compute_done; } - switch ((instr >> 1) & 0x3ff) { + switch ((word >> 1) & 0x3ff) { case 4: /* tw */ if (rd == 0x1f || (rd & trap_compare((int)regs->gpr[ra], @@ -1536,17 +1538,17 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->reg = rd; /* only MSR_EE and MSR_RI get changed if bit 15 set */ /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ - imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL; + imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL; op->val = imm; return 0; #endif case 19: /* mfcr */ imm = 0xffffffffUL; - if ((instr >> 20) & 1) { + if ((word >> 20) & 1) { imm = 0xf0000000UL; for (sh = 0; sh < 8; ++sh) { - if (instr & (0x80000 >> sh)) + if (word & (0x80000 >> sh)) break; imm >>= 4; } @@ -1560,7 +1562,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, val = regs->gpr[rd]; op->ccval = regs->ccr; for (sh = 0; sh < 8; ++sh) { - if (instr & (0x80000 >> sh)) + if (word & (0x80000 >> sh)) op->ccval = (op->ccval & ~imm) | (val & imm); imm >>= 4; @@ -1568,7 +1570,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 1; case 339: /* mfspr */ - spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); + spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); op->type = MFSPR; op->reg = rd; op->spr = spr; @@ -1578,7 +1580,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 0; case 467: /* mtspr */ - spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); + spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); op->type = MTSPR; op->val = regs->gpr[rd]; op->spr = spr; @@ -1948,7 +1950,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 826: /* sradi with sh_5 = 0 */ case 827: /* sradi with sh_5 = 1 */ op->type = COMPUTE + SETREG + SETXER; - sh = rb | ((instr & 2) << 4); + sh = rb | ((word & 2) << 4); ival = (signed long int) regs->gpr[rd]; op->val = ival >> sh; op->xerval = regs->xer; @@ -1964,7 +1966,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, if (!cpu_has_feature(CPU_FTR_ARCH_300)) return -1; op->type = COMPUTE + SETREG; - sh = rb | ((instr & 2) << 4); + sh = rb | ((word & 2) << 4); val = (signed int) regs->gpr[rd]; if (sh) op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); @@ -1979,34 +1981,34 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, */ case 54: /* dcbst */ op->type = MKOP(CACHEOP, DCBST, 0); - op->ea = xform_ea(instr, regs); + op->ea = xform_ea(word, regs); return 0; case 86: /* dcbf */ op->type = MKOP(CACHEOP, DCBF, 0); - op->ea = xform_ea(instr, regs); + op->ea = xform_ea(word, regs); return 0; case 246: /* dcbtst */ op->type = MKOP(CACHEOP, DCBTST, 0); - op->ea = xform_ea(instr, regs); + op->ea = xform_ea(word, regs); op->reg = rd; return 0; case 278: /* dcbt */ op->type = MKOP(CACHEOP, DCBTST, 0); - op->ea = xform_ea(instr, regs); + op->ea = xform_ea(word, regs); op->reg = rd; return 0; case 982: /* icbi */ op->type = MKOP(CACHEOP, ICBI, 0); - op->ea = xform_ea(instr, regs); + op->ea = xform_ea(word, regs); return 0; case 1014: /* dcbz */ op->type = MKOP(CACHEOP, DCBZ, 0); - op->ea = xform_ea(instr, regs); + op->ea = xform_ea(word, regs); return 0; } break; @@ -2019,14 +2021,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->update_reg = ra; op->reg = rd; op->val = regs->gpr[rd]; - u = (instr >> 20) & UPDATE; + u = (word >> 20) & UPDATE; op->vsx_flags = 0; switch (opcode) { case 31: - u = instr & UPDATE; - op->ea = xform_ea(instr, regs); - switch ((instr >> 1) & 0x3ff) { + u = word & UPDATE; + op->ea = xform_ea(word, regs); + switch ((word >> 1) & 0x3ff) { case 20: /* lwarx */ op->type = MKOP(LARX, 0, 4); break; @@ -2271,25 +2273,25 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef CONFIG_VSX case 12: /* lxsiwzx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 8; break; case 76: /* lxsiwax */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, SIGNEXT, 4); op->element_size = 8; break; case 140: /* stxsiwx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 4); op->element_size = 8; break; case 268: /* lxvx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; @@ -2298,33 +2300,33 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 269: /* lxvl */ case 301: { /* lxvll */ int nb; - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->ea = ra ? regs->gpr[ra] : 0; nb = regs->gpr[rb] & 0xff; if (nb > 16) nb = 16; op->type = MKOP(LOAD_VSX, 0, nb); op->element_size = 16; - op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) | + op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | VSX_CHECK_VEC; break; } case 332: /* lxvdsx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 8); op->element_size = 8; op->vsx_flags = VSX_SPLAT; break; case 364: /* lxvwsx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 4; op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC; break; case 396: /* stxvx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; @@ -2333,118 +2335,118 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 397: /* stxvl */ case 429: { /* stxvll */ int nb; - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->ea = ra ? regs->gpr[ra] : 0; nb = regs->gpr[rb] & 0xff; if (nb > 16) nb = 16; op->type = MKOP(STORE_VSX, 0, nb); op->element_size = 16; - op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) | + op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | VSX_CHECK_VEC; break; } case 524: /* lxsspx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 8; op->vsx_flags = VSX_FPCONV; break; case 588: /* lxsdx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 8); op->element_size = 8; break; case 652: /* stxsspx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 4); op->element_size = 8; op->vsx_flags = VSX_FPCONV; break; case 716: /* stxsdx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 8); op->element_size = 8; break; case 780: /* lxvw4x */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 4; break; case 781: /* lxsibzx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 1); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 812: /* lxvh8x */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 2; op->vsx_flags = VSX_CHECK_VEC; break; case 813: /* lxsihzx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 2); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 844: /* lxvd2x */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 8; break; case 876: /* lxvb16x */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 1; op->vsx_flags = VSX_CHECK_VEC; break; case 908: /* stxvw4x */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 4; break; case 909: /* stxsibx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 1); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 940: /* stxvh8x */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 2; op->vsx_flags = VSX_CHECK_VEC; break; case 941: /* stxsihx */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 2); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 972: /* stxvd2x */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 8; break; case 1004: /* stxvb16x */ - op->reg = rd | ((instr & 1) << 5); + op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 1; op->vsx_flags = VSX_CHECK_VEC; @@ -2457,80 +2459,80 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 32: /* lwz */ case 33: /* lwzu */ op->type = MKOP(LOAD, u, 4); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 34: /* lbz */ case 35: /* lbzu */ op->type = MKOP(LOAD, u, 1); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 36: /* stw */ case 37: /* stwu */ op->type = MKOP(STORE, u, 4); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 38: /* stb */ case 39: /* stbu */ op->type = MKOP(STORE, u, 1); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 40: /* lhz */ case 41: /* lhzu */ op->type = MKOP(LOAD, u, 2); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 42: /* lha */ case 43: /* lhau */ op->type = MKOP(LOAD, SIGNEXT | u, 2); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 44: /* sth */ case 45: /* sthu */ op->type = MKOP(STORE, u, 2); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 46: /* lmw */ if (ra >= rd) break; /* invalid form, ra in range to load */ op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 47: /* stmw */ op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; #ifdef CONFIG_PPC_FPU case 48: /* lfs */ case 49: /* lfsu */ op->type = MKOP(LOAD_FP, u | FPCONV, 4); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 50: /* lfd */ case 51: /* lfdu */ op->type = MKOP(LOAD_FP, u, 8); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 52: /* stfs */ case 53: /* stfsu */ op->type = MKOP(STORE_FP, u | FPCONV, 4); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; case 54: /* stfd */ case 55: /* stfdu */ op->type = MKOP(STORE_FP, u, 8); - op->ea = dform_ea(instr, regs); + op->ea = dform_ea(word, regs); break; #endif @@ -2538,14 +2540,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 56: /* lq */ if (!((rd & 1) || (rd == ra))) op->type = MKOP(LOAD, 0, 16); - op->ea = dqform_ea(instr, regs); + op->ea = dqform_ea(word, regs); break; #endif #ifdef CONFIG_VSX case 57: /* lfdp, lxsd, lxssp */ - op->ea = dsform_ea(instr, regs); - switch (instr & 3) { + op->ea = dsform_ea(word, regs); + switch (word & 3) { case 0: /* lfdp */ if (rd & 1) break; /* reg must be even */ @@ -2569,8 +2571,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef __powerpc64__ case 58: /* ld[u], lwa */ - op->ea = dsform_ea(instr, regs); - switch (instr & 3) { + op->ea = dsform_ea(word, regs); + switch (word & 3) { case 0: /* ld */ op->type = MKOP(LOAD, 0, 8); break; @@ -2586,16 +2588,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef CONFIG_VSX case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ - switch (instr & 7) { + switch (word & 7) { case 0: /* stfdp with LSB of DS field = 0 */ case 4: /* stfdp with LSB of DS field = 1 */ - op->ea = dsform_ea(instr, regs); + op->ea = dsform_ea(word, regs); op->type = MKOP(STORE_FP, 0, 16); break; case 1: /* lxv */ - op->ea = dqform_ea(instr, regs); - if (instr & 8) + op->ea = dqform_ea(word, regs); + if (word & 8) op->reg = rd + 32; op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 16; @@ -2604,7 +2606,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 2: /* stxsd with LSB of DS field = 0 */ case 6: /* stxsd with LSB of DS field = 1 */ - op->ea = dsform_ea(instr, regs); + op->ea = dsform_ea(word, regs); op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 8); op->element_size = 8; @@ -2613,7 +2615,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 3: /* stxssp with LSB of DS field = 0 */ case 7: /* stxssp with LSB of DS field = 1 */ - op->ea = dsform_ea(instr, regs); + op->ea = dsform_ea(word, regs); op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 4); op->element_size = 8; @@ -2621,8 +2623,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 5: /* stxv */ - op->ea = dqform_ea(instr, regs); - if (instr & 8) + op->ea = dqform_ea(word, regs); + if (word & 8) op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 16; @@ -2634,8 +2636,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef __powerpc64__ case 62: /* std[u] */ - op->ea = dsform_ea(instr, regs); - switch (instr & 3) { + op->ea = dsform_ea(word, regs); + switch (word & 3) { case 0: /* std */ op->type = MKOP(STORE, 0, 8); break; @@ -2663,7 +2665,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 0; logical_done: - if (instr & 1) + if (word & 1) set_cr0(regs, op); logical_done_nocc: op->reg = ra; @@ -2671,7 +2673,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 1; arith_done: - if (instr & 1) + if (word & 1) set_cr0(regs, op); compute_done: op->reg = rd; diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 85d62f16d07a..b928b21feac1 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -847,12 +847,12 @@ static int __init emulate_compute_instr(struct pt_regs *regs, { struct instruction_op op; - if (!regs || !instr) + if (!regs || !ppc_inst_val(instr)) return -EINVAL; if (analyse_instr(&op, regs, instr) != 1 || GETTYPE(op.type) != COMPUTE) { - pr_info("emulation failed, instruction = 0x%08x\n", instr); + pr_info("emulation failed, instruction = 0x%08x\n", ppc_inst_val(instr)); return -EFAULT; } @@ -866,13 +866,13 @@ static int __init execute_compute_instr(struct pt_regs *regs, extern int exec_instr(struct pt_regs *regs); extern s32 patch__exec_instr; - if (!regs || !instr) + if (!regs || !ppc_inst_val(instr)) return -EINVAL; /* Patch the NOP with the actual instruction */ patch_instruction_site(&patch__exec_instr, instr); if (exec_instr(regs)) { - pr_info("execution failed, instruction = 0x%08x\n", instr); + pr_info("execution failed, instruction = 0x%08x\n", ppc_inst_val(instr)); return -EFAULT; } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 44457bae77a0..cec8f7e46941 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -49,7 +49,7 @@ static bool store_updates_sp(unsigned int inst) { /* check for 1 in the rA field */ - if (((inst >> 16) & 0x1f) != 1) + if (((ppc_inst_val(inst) >> 16) & 0x1f) != 1) return false; /* check major opcode */ switch (inst >> 26) { @@ -60,10 +60,10 @@ static bool store_updates_sp(unsigned int inst) case OP_STFDU: return true; case OP_STD: /* std or stdu */ - return (inst & 3) == 1; + return (ppc_inst_val(inst) & 3) == 1; case OP_31: /* check minor opcode */ - switch ((inst >> 1) & 0x3ff) { + switch ((ppc_inst_val(inst) >> 1) & 0x3ff) { case OP_31_XOP_STDUX: case OP_31_XOP_STWUX: case OP_31_XOP_STBUX: diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a56dcb004396..c5e4218716e4 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2872,9 +2872,9 @@ generic_inst_dump(unsigned long adr, long count, int praddr, dotted = 0; last_inst = inst; if (praddr) - printf(REG" %.8x", adr, inst); + printf(REG" %.8x", adr, ppc_inst_val(inst)); printf("\t"); - dump_func(inst, adr); + dump_func(ppc_inst_val(inst), adr); printf("\n"); } return adr - first_adr; -- cgit v1.2.3-59-g8ed1b From 8094892d1aff14269d3b7bfcd8b941217eecd81f Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:28 +1000 Subject: powerpc: Use a function for getting the instruction op code In preparation for using a data type for instructions that can not be directly used with the '>>' operator use a function for getting the op code of an instruction. Signed-off-by: Jordan Niethe Reviewed-by: Alistair Popple Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-9-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 5 +++++ arch/powerpc/kernel/align.c | 2 +- arch/powerpc/kernel/vecemu.c | 3 ++- arch/powerpc/lib/code-patching.c | 4 ++-- arch/powerpc/lib/sstep.c | 2 +- arch/powerpc/mm/fault.c | 3 ++- 6 files changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index e8e436fbbbf6..09b4ec7debc1 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -13,4 +13,9 @@ static inline u32 ppc_inst_val(u32 x) return x; } +static inline int ppc_inst_primary_opcode(u32 x) +{ + return ppc_inst_val(x) >> 26; +} + #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 44921001f84a..47dbba81a227 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -314,7 +314,7 @@ int fix_alignment(struct pt_regs *regs) } #ifdef CONFIG_SPE - if ((ppc_inst_val(instr) >> 26) == 0x4) { + if (ppc_inst_primary_opcode(instr) == 0x4) { int reg = (ppc_inst_val(instr) >> 21) & 0x1f; PPC_WARN_ALIGNMENT(spe, regs); return emulate_spe(regs, reg, instr); diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index 1f5e3b4c8ae4..a544590b90e5 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -10,6 +10,7 @@ #include #include #include +#include /* Functions in vector.S */ extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b); @@ -268,7 +269,7 @@ int emulate_altivec(struct pt_regs *regs) return -EFAULT; word = ppc_inst_val(instr); - if ((word >> 26) != 4) + if (ppc_inst_primary_opcode(instr) != 4) return -EINVAL; /* not an altivec instruction */ vd = (word >> 21) & 0x1f; va = (word >> 16) & 0x1f; diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index baa849b1a1f9..f5c6dcbac44b 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -231,7 +231,7 @@ bool is_offset_in_branch_range(long offset) */ bool is_conditional_branch(unsigned int instr) { - unsigned int opcode = instr >> 26; + unsigned int opcode = ppc_inst_primary_opcode(instr); if (opcode == 16) /* bc, bca, bcl, bcla */ return true; @@ -289,7 +289,7 @@ int create_cond_branch(unsigned int *instr, const unsigned int *addr, static unsigned int branch_opcode(unsigned int instr) { - return (instr >> 26) & 0x3F; + return ppc_inst_primary_opcode(instr) & 0x3F; } static int instr_is_branch_iform(unsigned int instr) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 14c93ee4ffc8..7f7be154da7e 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1175,7 +1175,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, word = ppc_inst_val(instr); op->type = COMPUTE; - opcode = instr >> 26; + opcode = ppc_inst_primary_opcode(instr); switch (opcode) { case 16: /* bc */ op->type = BRANCH; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index cec8f7e46941..2c23c3076b1e 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -41,6 +41,7 @@ #include #include #include +#include /* * Check whether the instruction inst is a store using @@ -52,7 +53,7 @@ static bool store_updates_sp(unsigned int inst) if (((ppc_inst_val(inst) >> 16) & 0x1f) != 1) return false; /* check major opcode */ - switch (inst >> 26) { + switch (ppc_inst_primary_opcode(inst)) { case OP_STWU: case OP_STBU: case OP_STHU: -- cgit v1.2.3-59-g8ed1b From aabd2233b6aefeee6d7a2f667076d8346be1d30a Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:29 +1000 Subject: powerpc: Use a function for byte swapping instructions Use a function for byte swapping instructions in preparation of a more complicated instruction type. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Balamuruhan S Link: https://lore.kernel.org/r/20200506034050.24806-10-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 5 +++++ arch/powerpc/kernel/align.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index 09b4ec7debc1..ff2c8fe3e262 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -18,4 +18,9 @@ static inline int ppc_inst_primary_opcode(u32 x) return ppc_inst_val(x) >> 26; } +static inline u32 ppc_inst_swab(u32 x) +{ + return ppc_inst(swab32(ppc_inst_val(x))); +} + #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 47dbba81a227..a63216da8cf1 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -310,7 +310,7 @@ int fix_alignment(struct pt_regs *regs) /* We don't handle PPC little-endian any more... */ if (cpu_has_feature(CPU_FTR_PPC_LE)) return -EIO; - instr = swab32(instr); + instr = ppc_inst_swab(instr); } #ifdef CONFIG_SPE -- cgit v1.2.3-59-g8ed1b From 217862d9b98bf08958d57fd7b31b9de0f1a9477d Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:30 +1000 Subject: powerpc: Introduce functions for instruction equality In preparation for an instruction data type that can not be directly used with the '==' operator use functions for checking equality. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Balamuruhan S Link: https://lore.kernel.org/r/20200506034050.24806-11-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 5 +++++ arch/powerpc/kernel/trace/ftrace.c | 15 ++++++++------- arch/powerpc/lib/code-patching.c | 12 ++++++------ arch/powerpc/xmon/xmon.c | 4 ++-- 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index ff2c8fe3e262..ff8d58671648 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -23,4 +23,9 @@ static inline u32 ppc_inst_swab(u32 x) return ppc_inst(swab32(ppc_inst_val(x))); } +static inline bool ppc_inst_equal(u32 x, u32 y) +{ + return x == y; +} + #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index cc23c63f3769..cbb19af4a72a 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -72,7 +72,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) return -EFAULT; /* Make sure it is what we expect it to be */ - if (replaced != old) { + if (!ppc_inst_equal(replaced, old)) { pr_err("%p: replaced (%#x) != old (%#x)", (void *)ip, ppc_inst_val(replaced), ppc_inst_val(old)); return -EINVAL; @@ -170,7 +170,8 @@ __ftrace_make_nop(struct module *mod, } /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ - if (op != ppc_inst(PPC_INST_MFLR) && op != ppc_inst(PPC_INST_STD_LR)) { + if (!ppc_inst_equal(op, ppc_inst(PPC_INST_MFLR)) && + !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) { pr_err("Unexpected instruction %08x around bl _mcount\n", ppc_inst_val(op)); return -EINVAL; @@ -201,7 +202,7 @@ __ftrace_make_nop(struct module *mod, return -EFAULT; } - if (op != ppc_inst(PPC_INST_LD_TOC)) { + if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) { pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, ppc_inst_val(op)); return -EINVAL; } @@ -498,7 +499,7 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) * The load offset is different depending on the ABI. For simplicity * just mask it out when doing the compare. */ - if (op0 != ppc_inst(0x48000008) || + if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) || (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000) return 0; return 1; @@ -508,7 +509,7 @@ static int expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) { /* look for patched "NOP" on ppc64 with -mprofile-kernel */ - if (op0 != ppc_inst(PPC_INST_NOP)) + if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP))) return 0; return 1; } @@ -591,7 +592,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EFAULT; /* It should be pointing to a nop */ - if (op != ppc_inst(PPC_INST_NOP)) { + if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) { pr_err("Expected NOP but have %x\n", ppc_inst_val(op)); return -EINVAL; } @@ -648,7 +649,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) return -EFAULT; } - if (op != ppc_inst(PPC_INST_NOP)) { + if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) { pr_err("Unexpected call sequence at %p: %x\n", ip, ppc_inst_val(op)); return -EINVAL; } diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index f5c6dcbac44b..d298bb16936e 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -479,7 +479,7 @@ static void __init test_branch_iform(void) /* Check flags are masked correctly */ err = create_branch(&instr, &instr, addr, 0xFFFFFFFC); check(instr_is_branch_to_addr(&instr, addr)); - check(instr == ppc_inst(0x48000000)); + check(ppc_inst_equal(instr, ppc_inst(0x48000000))); } static void __init test_create_function_call(void) @@ -564,7 +564,7 @@ static void __init test_branch_bform(void) /* Check flags are masked correctly */ err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC); check(instr_is_branch_to_addr(&instr, addr)); - check(instr == ppc_inst(0x43FF0000)); + check(ppc_inst_equal(instr, ppc_inst(0x43FF0000))); } static void __init test_translate_branch(void) @@ -598,7 +598,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == ppc_inst(0x4a000000)); + check(ppc_inst_equal(*q, ppc_inst(0x4a000000))); /* Maximum positive case, move x to x - 32 MB + 4 */ p = buf + 0x2000000; @@ -609,7 +609,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == ppc_inst(0x49fffffc)); + check(ppc_inst_equal(*q, ppc_inst(0x49fffffc))); /* Jump to x + 16 MB moved to x + 20 MB */ p = buf; @@ -655,7 +655,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == ppc_inst(0x43ff8000)); + check(ppc_inst_equal(*q, ppc_inst(0x43ff8000))); /* Maximum positive case, move x to x - 32 KB + 4 */ p = buf + 0x8000; @@ -667,7 +667,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == ppc_inst(0x43ff7ffc)); + check(ppc_inst_equal(*q, ppc_inst(0x43ff7ffc))); /* Jump to x + 12 KB moved to x + 20 KB */ p = buf; diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index c5e4218716e4..4cf998518047 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -947,7 +947,7 @@ static void remove_bpts(void) if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP) continue; if (mread(bp->address, &instr, 4) == 4 - && instr == ppc_inst(bpinstr) + && ppc_inst_equal(instr, ppc_inst(bpinstr)) && patch_instruction( (unsigned int *)bp->address, bp->instr[0]) != 0) printf("Couldn't remove breakpoint at %lx\n", @@ -2862,7 +2862,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, break; } inst = ppc_inst(GETWORD(val)); - if (adr > first_adr && inst == last_inst) { + if (adr > first_adr && ppc_inst_equal(inst, last_inst)) { if (!dotted) { printf(" ...\n"); dotted = 1; -- cgit v1.2.3-59-g8ed1b From 94afd069d937d84fb4f696eb9a78db4084e43d21 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:31 +1000 Subject: powerpc: Use a datatype for instructions Currently unsigned ints are used to represent instructions on powerpc. This has worked well as instructions have always been 4 byte words. However, ISA v3.1 introduces some changes to instructions that mean this scheme will no longer work as well. This change is Prefixed Instructions. A prefixed instruction is made up of a word prefix followed by a word suffix to make an 8 byte double word instruction. No matter the endianness of the system the prefix always comes first. Prefixed instructions are only planned for powerpc64. Introduce a ppc_inst type to represent both prefixed and word instructions on powerpc64 while keeping it possible to exclusively have word instructions on powerpc32. Signed-off-by: Jordan Niethe [mpe: Fix compile error in emulate_spe()] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-12-jniethe5@gmail.com --- arch/powerpc/include/asm/code-patching.h | 32 ++++++------ arch/powerpc/include/asm/inst.h | 18 ++++--- arch/powerpc/include/asm/sstep.h | 5 +- arch/powerpc/include/asm/uprobes.h | 5 +- arch/powerpc/kernel/align.c | 9 ++-- arch/powerpc/kernel/crash_dump.c | 2 +- arch/powerpc/kernel/epapr_paravirt.c | 6 +-- arch/powerpc/kernel/hw_breakpoint.c | 4 +- arch/powerpc/kernel/jump_label.c | 2 +- arch/powerpc/kernel/kgdb.c | 4 +- arch/powerpc/kernel/kprobes.c | 8 +-- arch/powerpc/kernel/mce_power.c | 5 +- arch/powerpc/kernel/optprobes.c | 64 ++++++++++++++---------- arch/powerpc/kernel/setup_32.c | 4 +- arch/powerpc/kernel/trace/ftrace.c | 83 ++++++++++++++++--------------- arch/powerpc/kernel/vecemu.c | 5 +- arch/powerpc/lib/code-patching.c | 76 ++++++++++++++-------------- arch/powerpc/lib/feature-fixups.c | 62 ++++++++++++----------- arch/powerpc/lib/sstep.c | 4 +- arch/powerpc/lib/test_emulate_step.c | 9 ++-- arch/powerpc/mm/fault.c | 4 +- arch/powerpc/perf/core-book3s.c | 4 +- arch/powerpc/platforms/86xx/mpc86xx_smp.c | 4 +- arch/powerpc/platforms/powermac/smp.c | 4 +- arch/powerpc/xmon/xmon.c | 22 ++++---- arch/powerpc/xmon/xmon_bpts.h | 6 +-- 26 files changed, 237 insertions(+), 214 deletions(-) diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 48e021957ee5..eacc9102c251 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -23,33 +23,33 @@ #define BRANCH_ABSOLUTE 0x2 bool is_offset_in_branch_range(long offset); -int create_branch(unsigned int *instr, const unsigned int *addr, +int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr, unsigned long target, int flags); -int create_cond_branch(unsigned int *instr, const unsigned int *addr, +int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr, unsigned long target, int flags); -int patch_branch(unsigned int *addr, unsigned long target, int flags); -int patch_instruction(unsigned int *addr, unsigned int instr); -int raw_patch_instruction(unsigned int *addr, unsigned int instr); +int patch_branch(struct ppc_inst *addr, unsigned long target, int flags); +int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr); +int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr); static inline unsigned long patch_site_addr(s32 *site) { return (unsigned long)site + *site; } -static inline int patch_instruction_site(s32 *site, unsigned int instr) +static inline int patch_instruction_site(s32 *site, struct ppc_inst instr) { - return patch_instruction((unsigned int *)patch_site_addr(site), instr); + return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr); } static inline int patch_branch_site(s32 *site, unsigned long target, int flags) { - return patch_branch((unsigned int *)patch_site_addr(site), target, flags); + return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags); } static inline int modify_instruction(unsigned int *addr, unsigned int clr, unsigned int set) { - return patch_instruction(addr, ppc_inst((*addr & ~clr) | set)); + return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set)); } static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set) @@ -57,13 +57,13 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned return modify_instruction((unsigned int *)patch_site_addr(site), clr, set); } -int instr_is_relative_branch(unsigned int instr); -int instr_is_relative_link_branch(unsigned int instr); -int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); -unsigned long branch_target(const unsigned int *instr); -int translate_branch(unsigned int *instr, const unsigned int *dest, - const unsigned int *src); -extern bool is_conditional_branch(unsigned int instr); +int instr_is_relative_branch(struct ppc_inst instr); +int instr_is_relative_link_branch(struct ppc_inst instr); +int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr); +unsigned long branch_target(const struct ppc_inst *instr); +int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest, + const struct ppc_inst *src); +extern bool is_conditional_branch(struct ppc_inst instr); #ifdef CONFIG_PPC_BOOK3E_64 void __patch_exception(int exc, unsigned long addr); #define patch_exception(exc, name) do { \ diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index ff8d58671648..f602ca908936 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -6,26 +6,30 @@ * Instruction data type for POWER */ -#define ppc_inst(x) (x) +struct ppc_inst { + u32 val; +} __packed; -static inline u32 ppc_inst_val(u32 x) +#define ppc_inst(x) ((struct ppc_inst){ .val = x }) + +static inline u32 ppc_inst_val(struct ppc_inst x) { - return x; + return x.val; } -static inline int ppc_inst_primary_opcode(u32 x) +static inline int ppc_inst_primary_opcode(struct ppc_inst x) { return ppc_inst_val(x) >> 26; } -static inline u32 ppc_inst_swab(u32 x) +static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x) { return ppc_inst(swab32(ppc_inst_val(x))); } -static inline bool ppc_inst_equal(u32 x, u32 y) +static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) { - return x == y; + return ppc_inst_val(x) == ppc_inst_val(y); } #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 26d729562fe2..c3ce903ac488 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -2,6 +2,7 @@ /* * Copyright (C) 2004 Paul Mackerras , IBM */ +#include struct pt_regs; @@ -132,7 +133,7 @@ union vsx_reg { * otherwise. */ extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, - unsigned int instr); + struct ppc_inst instr); /* * Emulate an instruction that can be executed just by updating @@ -149,7 +150,7 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op); * 0 if it could not be emulated, or -1 for an instruction that * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.). */ -extern int emulate_step(struct pt_regs *regs, unsigned int instr); +extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr); /* * Emulate a load or store instruction by reading/writing the diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 2bbdf27d09b5..7e3b329ba2d3 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -11,6 +11,7 @@ #include #include +#include typedef ppc_opcode_t uprobe_opcode_t; @@ -23,8 +24,8 @@ typedef ppc_opcode_t uprobe_opcode_t; struct arch_uprobe { union { - u32 insn; - u32 ixol; + struct ppc_inst insn; + struct ppc_inst ixol; }; }; diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a63216da8cf1..9b35d6160507 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -105,7 +105,7 @@ static struct aligninfo spe_aligninfo[32] = { * so we don't need the address swizzling. */ static int emulate_spe(struct pt_regs *regs, unsigned int reg, - unsigned int instr) + struct ppc_inst ppc_instr) { int ret; union { @@ -116,8 +116,9 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, } data, temp; unsigned char __user *p, *addr; unsigned long *evr = ¤t->thread.evr[reg]; - unsigned int nb, flags; + unsigned int nb, flags, instr; + instr = ppc_inst_val(ppc_instr); instr = (instr >> 1) & 0x1f; /* DAR has the operand effective address */ @@ -294,7 +295,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, int fix_alignment(struct pt_regs *regs) { - unsigned int instr; + struct ppc_inst instr; struct instruction_op op; int r, type; @@ -304,7 +305,7 @@ int fix_alignment(struct pt_regs *regs) */ CHECK_FULL_REGS(regs); - if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip))) + if (unlikely(__get_user(instr.val, (unsigned int __user *)regs->nip))) return -EFAULT; if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { /* We don't handle PPC little-endian any more... */ diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 78e556b131db..72bafb47e757 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -35,7 +35,7 @@ void __init reserve_kdump_trampoline(void) static void __init create_trampoline(unsigned long addr) { - unsigned int *p = (unsigned int *)addr; + struct ppc_inst *p = (struct ppc_inst *)addr; /* The maximum range of a single instruction branch, is the current * instruction's address + (32 MB - 4) bytes. For the trampoline we diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index e8eb72a65572..2ed14d4a47f5 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -37,10 +37,10 @@ static int __init early_init_dt_scan_epapr(unsigned long node, return -1; for (i = 0; i < (len / 4); i++) { - u32 inst = ppc_inst(be32_to_cpu(insts[i])); - patch_instruction(epapr_hypercall_start + i, inst); + struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i])); + patch_instruction((struct ppc_inst *)(epapr_hypercall_start + i), inst); #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) - patch_instruction(epapr_ev_idle_start + i, inst); + patch_instruction((struct ppc_inst *)(epapr_ev_idle_start + i), inst); #endif } diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 46e09ac8b84a..2db9a7ac7bcb 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -244,12 +244,12 @@ dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info) static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp, struct arch_hw_breakpoint *info) { - unsigned int instr = ppc_inst(0); + struct ppc_inst instr = ppc_inst(0); int ret, type, size; struct instruction_op op; unsigned long addr = info->address; - if (__get_user_inatomic(instr, (unsigned int *)regs->nip)) + if (__get_user_inatomic(instr.val, (unsigned int *)regs->nip)) goto fail; ret = analyse_instr(&op, regs, instr); diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index daa4afce7ec8..144858027fa3 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,7 +11,7 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - u32 *addr = (u32 *)(unsigned long)entry->code; + struct ppc_inst *addr = (struct ppc_inst *)(unsigned long)entry->code; if (type == JUMP_LABEL_JMP) patch_branch(addr, entry->target, 0); diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index a6b38a19133f..652b2852bea3 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -419,7 +419,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned int instr; - unsigned int *addr = (unsigned int *)bpt->bpt_addr; + struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr; err = probe_kernel_address(addr, instr); if (err) @@ -438,7 +438,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned int instr = *(unsigned int *)bpt->saved_instr; - unsigned int *addr = (unsigned int *)bpt->bpt_addr; + struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr; err = patch_instruction(addr, ppc_inst(instr)); if (err) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 92fa3070d905..a08ae5803622 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -106,7 +106,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) int arch_prepare_kprobe(struct kprobe *p) { int ret = 0; - kprobe_opcode_t insn = *p->addr; + struct ppc_inst insn = *(struct ppc_inst *)p->addr; if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); @@ -139,13 +139,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe); void arch_arm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)); + patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)); } NOKPROBE_SYMBOL(arch_arm_kprobe); void arch_disarm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, ppc_inst(p->opcode)); + patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode)); } NOKPROBE_SYMBOL(arch_disarm_kprobe); @@ -217,7 +217,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe); static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) { int ret; - unsigned int insn = *p->ainsn.insn; + struct ppc_inst insn = *(struct ppc_inst *)p->ainsn.insn; /* regs->nip is also adjusted if emulate_step returns 1 */ ret = emulate_step(regs, insn); diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 1d18991f3854..08b355f80d9e 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -20,6 +20,7 @@ #include #include #include +#include /* * Convert an address related to an mm to a PFN. NOTE: we are in real @@ -369,7 +370,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, * in real-mode is tricky and can lead to recursive * faults */ - int instr; + struct ppc_inst instr; unsigned long pfn, instr_addr; struct instruction_op op; struct pt_regs tmp = *regs; @@ -377,7 +378,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, pfn = addr_to_pfn(regs, regs->nip); if (pfn != ULONG_MAX) { instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK); - instr = *(unsigned int *)(instr_addr); + instr = *(struct ppc_inst *)(instr_addr); if (!analyse_instr(&op, &tmp, instr)) { pfn = addr_to_pfn(regs, op.ea); *addr = op.ea; diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 44006c4ca4f1..5a71fef71c22 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -100,8 +100,9 @@ static unsigned long can_optimize(struct kprobe *p) * Ensure that the instruction is not a conditional branch, * and that can be emulated. */ - if (!is_conditional_branch(*p->ainsn.insn) && - analyse_instr(&op, ®s, *p->ainsn.insn) == 1) { + if (!is_conditional_branch(*(struct ppc_inst *)p->ainsn.insn) && + analyse_instr(&op, ®s, + *(struct ppc_inst *)p->ainsn.insn) == 1) { emulate_update_regs(®s, &op); nip = regs.nip; } @@ -148,13 +149,15 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) { /* addis r4,0,(insn)@h */ - patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | - ((val >> 16) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | + ((val >> 16) & 0xffff))); addr++; /* ori r4,r4,(insn)@l */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | - ___PPC_RS(4) | (val & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | + ___PPC_RS(4) | (val & 0xffff))); } /* @@ -164,34 +167,39 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) { /* lis r3,(op)@highest */ - patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | - ((val >> 48) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | + ((val >> 48) & 0xffff))); addr++; /* ori r3,r3,(op)@higher */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 32) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 32) & 0xffff))); addr++; /* rldicr r3,r3,32,31 */ - patch_instruction(addr, ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | - ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | + ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); addr++; /* oris r3,r3,(op)@h */ - patch_instruction(addr, ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 16) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 16) & 0xffff))); addr++; /* ori r3,r3,(op)@l */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | (val & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | (val & 0xffff))); } int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) { - kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step; - kprobe_opcode_t *op_callback_addr, *emulate_step_addr; + struct ppc_inst branch_op_callback, branch_emulate_step; + kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff; long b_offset; unsigned long nip, size; int rc, i; @@ -231,7 +239,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int); pr_devel("Copying template to %p, size %lu\n", buff, size); for (i = 0; i < size; i++) { - rc = patch_instruction(buff + i, + rc = patch_instruction((struct ppc_inst *)(buff + i), ppc_inst(*(optprobe_template_entry + i))); if (rc < 0) goto error; @@ -254,20 +262,22 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) } rc = create_branch(&branch_op_callback, - (unsigned int *)buff + TMPL_CALL_HDLR_IDX, + (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX), (unsigned long)op_callback_addr, BRANCH_SET_LINK); rc |= create_branch(&branch_emulate_step, - (unsigned int *)buff + TMPL_EMULATE_IDX, + (struct ppc_inst *)(buff + TMPL_EMULATE_IDX), (unsigned long)emulate_step_addr, BRANCH_SET_LINK); if (rc) goto error; - patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback); - patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step); + patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX), + branch_op_callback); + patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX), + branch_emulate_step); /* * 3. load instruction to be emulated into relevant register, and @@ -277,7 +287,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) /* * 4. branch back from trampoline */ - patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0); + patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0); flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX])); @@ -309,7 +319,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) void arch_optimize_kprobes(struct list_head *oplist) { - unsigned int instr; + struct ppc_inst instr; struct optimized_kprobe *op; struct optimized_kprobe *tmp; @@ -321,9 +331,9 @@ void arch_optimize_kprobes(struct list_head *oplist) memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE); create_branch(&instr, - (unsigned int *)op->kp.addr, + (struct ppc_inst *)op->kp.addr, (unsigned long)op->optinsn.insn, 0); - patch_instruction(op->kp.addr, instr); + patch_instruction((struct ppc_inst *)op->kp.addr, instr); list_del_init(&op->list); } } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 0536e4aed330..15f0a7c84944 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -74,8 +74,8 @@ EXPORT_SYMBOL(DMA_MODE_WRITE); */ notrace void __init machine_init(u64 dt_ptr) { - unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache); - unsigned int insn; + struct ppc_inst *addr = (struct ppc_inst *)patch_site_addr(&patch__memset_nocache); + struct ppc_inst insn; /* Configure static keys first, now that we're relocated. */ setup_feature_keys(); diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index cbb19af4a72a..3117ed675735 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -41,23 +41,23 @@ #define NUM_FTRACE_TRAMPS 8 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; -static unsigned int +static struct ppc_inst ftrace_call_replace(unsigned long ip, unsigned long addr, int link) { - unsigned int op; + struct ppc_inst op; addr = ppc_function_entry((void *)addr); /* if (link) set op to 'bl' else 'b' */ - create_branch(&op, (unsigned int *)ip, addr, link ? 1 : 0); + create_branch(&op, (struct ppc_inst *)ip, addr, link ? 1 : 0); return op; } static int -ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) +ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new) { - unsigned int replaced; + struct ppc_inst replaced; /* * Note: @@ -79,7 +79,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) } /* replace the text with the new text */ - if (patch_instruction((unsigned int *)ip, new)) + if (patch_instruction((struct ppc_inst *)ip, new)) return -EPERM; return 0; @@ -90,24 +90,24 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) */ static int test_24bit_addr(unsigned long ip, unsigned long addr) { - unsigned int op; + struct ppc_inst op; addr = ppc_function_entry((void *)addr); /* use the create_branch to verify that this offset can be branched */ - return create_branch(&op, (unsigned int *)ip, addr, 0) == 0; + return create_branch(&op, (struct ppc_inst *)ip, addr, 0) == 0; } -static int is_bl_op(unsigned int op) +static int is_bl_op(struct ppc_inst op) { return (ppc_inst_val(op) & 0xfc000003) == 0x48000001; } -static int is_b_op(unsigned int op) +static int is_b_op(struct ppc_inst op) { return (ppc_inst_val(op) & 0xfc000003) == 0x48000000; } -static unsigned long find_bl_target(unsigned long ip, unsigned int op) +static unsigned long find_bl_target(unsigned long ip, struct ppc_inst op) { int offset; @@ -127,7 +127,7 @@ __ftrace_make_nop(struct module *mod, { unsigned long entry, ptr, tramp; unsigned long ip = rec->ip; - unsigned int op, pop; + struct ppc_inst op, pop; /* read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { @@ -208,7 +208,7 @@ __ftrace_make_nop(struct module *mod, } #endif /* CONFIG_MPROFILE_KERNEL */ - if (patch_instruction((unsigned int *)ip, pop)) { + if (patch_instruction((struct ppc_inst *)ip, pop)) { pr_err("Patching NOP failed.\n"); return -EPERM; } @@ -221,7 +221,7 @@ static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op; + struct ppc_inst op; unsigned int jmp[4]; unsigned long ip = rec->ip; unsigned long tramp; @@ -280,7 +280,7 @@ __ftrace_make_nop(struct module *mod, op = ppc_inst(PPC_INST_NOP); - if (patch_instruction((unsigned int *)ip, op)) + if (patch_instruction((struct ppc_inst *)ip, op)) return -EPERM; return 0; @@ -291,7 +291,7 @@ __ftrace_make_nop(struct module *mod, static unsigned long find_ftrace_tramp(unsigned long ip) { int i; - unsigned int instr; + struct ppc_inst instr; /* * We have the compiler generated long_branch tramps at the end @@ -328,9 +328,10 @@ static int add_ftrace_tramp(unsigned long tramp) */ static int setup_mcount_compiler_tramp(unsigned long tramp) { - int i, op; + int i; + struct ppc_inst op; unsigned long ptr; - unsigned int instr; + struct ppc_inst instr; static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS]; /* Is this a known long jump tramp? */ @@ -379,7 +380,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) return -1; } - if (patch_branch((unsigned int *)tramp, ptr, 0)) { + if (patch_branch((struct ppc_inst *)tramp, ptr, 0)) { pr_debug("REL24 out of range!\n"); return -1; } @@ -395,7 +396,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) { unsigned long tramp, ip = rec->ip; - unsigned int op; + struct ppc_inst op; /* Read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { @@ -423,7 +424,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) } } - if (patch_instruction((unsigned int *)ip, ppc_inst(PPC_INST_NOP))) { + if (patch_instruction((struct ppc_inst *)ip, ppc_inst(PPC_INST_NOP))) { pr_err("Patching NOP failed.\n"); return -EPERM; } @@ -435,7 +436,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -488,7 +489,7 @@ int ftrace_make_nop(struct module *mod, */ #ifndef CONFIG_MPROFILE_KERNEL static int -expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) +expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1) { /* * We expect to see: @@ -506,7 +507,7 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) } #else static int -expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) +expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1) { /* look for patched "NOP" on ppc64 with -mprofile-kernel */ if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP))) @@ -518,8 +519,8 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op[2]; - unsigned int instr; + struct ppc_inst op[2]; + struct ppc_inst instr; void *ip = (void *)rec->ip; unsigned long entry, ptr, tramp; struct module *mod = rec->arch.mod; @@ -584,7 +585,7 @@ static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int err; - unsigned int op; + struct ppc_inst op; unsigned long ip = rec->ip; /* read where this goes */ @@ -604,7 +605,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) } /* create the branch to the trampoline */ - err = create_branch(&op, (unsigned int *)ip, + err = create_branch(&op, (struct ppc_inst *)ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK); if (err) { pr_err("REL24 out of range!\n"); @@ -613,7 +614,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) pr_devel("write to %lx\n", rec->ip); - if (patch_instruction((unsigned int *)ip, op)) + if (patch_instruction((struct ppc_inst *)ip, op)) return -EPERM; return 0; @@ -623,7 +624,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op; + struct ppc_inst op; void *ip = (void *)rec->ip; unsigned long tramp, entry, ptr; @@ -671,7 +672,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -710,7 +711,7 @@ static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { - unsigned int op; + struct ppc_inst op; unsigned long ip = rec->ip; unsigned long entry, ptr, tramp; struct module *mod = rec->arch.mod; @@ -758,7 +759,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, /* The new target may be within range */ if (test_24bit_addr(ip, addr)) { /* within range */ - if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) { + if (patch_branch((struct ppc_inst *)ip, addr, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; } @@ -786,12 +787,12 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } /* Ensure branch is within 24 bits */ - if (create_branch(&op, (unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + if (create_branch(&op, (struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) { pr_err("Branch out of range\n"); return -EINVAL; } - if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + if (patch_branch((struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; } @@ -804,7 +805,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -844,10 +845,10 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); - unsigned int old, new; + struct ppc_inst old, new; int ret; - old = *(unsigned int *)&ftrace_call; + old = *(struct ppc_inst *)&ftrace_call; new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); @@ -855,7 +856,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); - old = *(unsigned int *)&ftrace_regs_call; + old = *(struct ppc_inst *)&ftrace_regs_call; new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); } @@ -929,7 +930,7 @@ int ftrace_enable_ftrace_graph_caller(void) unsigned long ip = (unsigned long)(&ftrace_graph_call); unsigned long addr = (unsigned long)(&ftrace_graph_caller); unsigned long stub = (unsigned long)(&ftrace_graph_stub); - unsigned int old, new; + struct ppc_inst old, new; old = ftrace_call_replace(ip, stub, 0); new = ftrace_call_replace(ip, addr, 0); @@ -942,7 +943,7 @@ int ftrace_disable_ftrace_graph_caller(void) unsigned long ip = (unsigned long)(&ftrace_graph_call); unsigned long addr = (unsigned long)(&ftrace_graph_caller); unsigned long stub = (unsigned long)(&ftrace_graph_stub); - unsigned int old, new; + struct ppc_inst old, new; old = ftrace_call_replace(ip, addr, 0); new = ftrace_call_replace(ip, stub, 0); diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index a544590b90e5..3dd70eeb10c5 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -261,11 +261,12 @@ static unsigned int rfin(unsigned int x) int emulate_altivec(struct pt_regs *regs) { - unsigned int instr, i, word; + struct ppc_inst instr; + unsigned int i, word; unsigned int va, vb, vc, vd; vector128 *vrs; - if (get_user(instr, (unsigned int __user *) regs->nip)) + if (get_user(instr.val, (unsigned int __user *)regs->nip)) return -EFAULT; word = ppc_inst_val(instr); diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index d298bb16936e..1dff9d9d6645 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -19,12 +19,12 @@ #include #include -static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, - unsigned int *patch_addr) +static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr, + struct ppc_inst *patch_addr) { int err = 0; - __put_user_asm(instr, patch_addr, err, "stw"); + __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw"); if (err) return err; @@ -34,7 +34,7 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, return 0; } -int raw_patch_instruction(unsigned int *addr, unsigned int instr) +int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) { return __patch_instruction(addr, instr, addr); } @@ -137,10 +137,10 @@ static inline int unmap_patch_area(unsigned long addr) return 0; } -static int do_patch_instruction(unsigned int *addr, unsigned int instr) +static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) { int err; - unsigned int *patch_addr = NULL; + struct ppc_inst *patch_addr = NULL; unsigned long flags; unsigned long text_poke_addr; unsigned long kaddr = (unsigned long)addr; @@ -161,8 +161,7 @@ static int do_patch_instruction(unsigned int *addr, unsigned int instr) goto out; } - patch_addr = (unsigned int *)(text_poke_addr) + - ((kaddr & ~PAGE_MASK) / sizeof(unsigned int)); + patch_addr = (struct ppc_inst *)(text_poke_addr + (kaddr & ~PAGE_MASK)); __patch_instruction(addr, instr, patch_addr); @@ -177,14 +176,14 @@ out: } #else /* !CONFIG_STRICT_KERNEL_RWX */ -static int do_patch_instruction(unsigned int *addr, unsigned int instr) +static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) { return raw_patch_instruction(addr, instr); } #endif /* CONFIG_STRICT_KERNEL_RWX */ -int patch_instruction(unsigned int *addr, unsigned int instr) +int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) { /* Make sure we aren't patching a freed init section */ if (init_mem_is_free && init_section_contains(addr, 4)) { @@ -195,9 +194,9 @@ int patch_instruction(unsigned int *addr, unsigned int instr) } NOKPROBE_SYMBOL(patch_instruction); -int patch_branch(unsigned int *addr, unsigned long target, int flags) +int patch_branch(struct ppc_inst *addr, unsigned long target, int flags) { - unsigned int instr; + struct ppc_inst instr; create_branch(&instr, addr, target, flags); return patch_instruction(addr, instr); @@ -229,7 +228,7 @@ bool is_offset_in_branch_range(long offset) * Helper to check if a given instruction is a conditional branch * Derived from the conditional checks in analyse_instr() */ -bool is_conditional_branch(unsigned int instr) +bool is_conditional_branch(struct ppc_inst instr) { unsigned int opcode = ppc_inst_primary_opcode(instr); @@ -247,13 +246,13 @@ bool is_conditional_branch(unsigned int instr) } NOKPROBE_SYMBOL(is_conditional_branch); -int create_branch(unsigned int *instr, - const unsigned int *addr, +int create_branch(struct ppc_inst *instr, + const struct ppc_inst *addr, unsigned long target, int flags) { long offset; - *instr = 0; + *instr = ppc_inst(0); offset = target; if (! (flags & BRANCH_ABSOLUTE)) offset = offset - (unsigned long)addr; @@ -263,12 +262,12 @@ int create_branch(unsigned int *instr, return 1; /* Mask out the flags and target, so they don't step on each other. */ - *instr = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC); + *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC)); return 0; } -int create_cond_branch(unsigned int *instr, const unsigned int *addr, +int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr, unsigned long target, int flags) { long offset; @@ -282,27 +281,27 @@ int create_cond_branch(unsigned int *instr, const unsigned int *addr, return 1; /* Mask out the flags and target, so they don't step on each other. */ - *instr = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC); + *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); return 0; } -static unsigned int branch_opcode(unsigned int instr) +static unsigned int branch_opcode(struct ppc_inst instr) { return ppc_inst_primary_opcode(instr) & 0x3F; } -static int instr_is_branch_iform(unsigned int instr) +static int instr_is_branch_iform(struct ppc_inst instr) { return branch_opcode(instr) == 18; } -static int instr_is_branch_bform(unsigned int instr) +static int instr_is_branch_bform(struct ppc_inst instr) { return branch_opcode(instr) == 16; } -int instr_is_relative_branch(unsigned int instr) +int instr_is_relative_branch(struct ppc_inst instr) { if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) return 0; @@ -310,12 +309,12 @@ int instr_is_relative_branch(unsigned int instr) return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); } -int instr_is_relative_link_branch(unsigned int instr) +int instr_is_relative_link_branch(struct ppc_inst instr) { return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); } -static unsigned long branch_iform_target(const unsigned int *instr) +static unsigned long branch_iform_target(const struct ppc_inst *instr) { signed long imm; @@ -331,7 +330,7 @@ static unsigned long branch_iform_target(const unsigned int *instr) return (unsigned long)imm; } -static unsigned long branch_bform_target(const unsigned int *instr) +static unsigned long branch_bform_target(const struct ppc_inst *instr) { signed long imm; @@ -347,7 +346,7 @@ static unsigned long branch_bform_target(const unsigned int *instr) return (unsigned long)imm; } -unsigned long branch_target(const unsigned int *instr) +unsigned long branch_target(const struct ppc_inst *instr) { if (instr_is_branch_iform(*instr)) return branch_iform_target(instr); @@ -357,7 +356,7 @@ unsigned long branch_target(const unsigned int *instr) return 0; } -int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr) +int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr) { if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr)) return branch_target(instr) == addr; @@ -365,8 +364,8 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr) return 0; } -int translate_branch(unsigned int *instr, const unsigned int *dest, - const unsigned int *src) +int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest, + const struct ppc_inst *src) { unsigned long target; @@ -392,7 +391,7 @@ void __patch_exception(int exc, unsigned long addr) * instruction of the exception, not the first one */ - patch_branch(ibase + (exc / 4) + 1, addr, 0); + patch_branch((struct ppc_inst *)(ibase + (exc / 4) + 1), addr, 0); } #endif @@ -409,7 +408,7 @@ static void __init test_trampoline(void) static void __init test_branch_iform(void) { int err; - unsigned int instr; + struct ppc_inst instr; unsigned long addr; addr = (unsigned long)&instr; @@ -484,12 +483,12 @@ static void __init test_branch_iform(void) static void __init test_create_function_call(void) { - unsigned int *iptr; + struct ppc_inst *iptr; unsigned long dest; - unsigned int instr; + struct ppc_inst instr; /* Check we can create a function call */ - iptr = (unsigned int *)ppc_function_entry(test_trampoline); + iptr = (struct ppc_inst *)ppc_function_entry(test_trampoline); dest = ppc_function_entry(test_create_function_call); create_branch(&instr, iptr, dest, BRANCH_SET_LINK); patch_instruction(iptr, instr); @@ -500,7 +499,8 @@ static void __init test_branch_bform(void) { int err; unsigned long addr; - unsigned int *iptr, instr, flags; + struct ppc_inst *iptr, instr; + unsigned int flags; iptr = &instr; addr = (unsigned long)iptr; @@ -570,8 +570,8 @@ static void __init test_branch_bform(void) static void __init test_translate_branch(void) { unsigned long addr; - unsigned int *p, *q; - unsigned int instr; + struct ppc_inst *p, *q; + struct ppc_inst instr; void *buf; buf = vmalloc(PAGE_ALIGN(0x2000000 + 1)); diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 6e7479b8887a..fd978b8ee6d6 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -32,26 +32,26 @@ struct fixup_entry { long alt_end_off; }; -static unsigned int *calc_addr(struct fixup_entry *fcur, long offset) +static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset) { /* * We store the offset to the code as a negative offset from * the start of the alt_entry, to support the VDSO. This * routine converts that back into an actual address. */ - return (unsigned int *)((unsigned long)fcur + offset); + return (struct ppc_inst *)((unsigned long)fcur + offset); } -static int patch_alt_instruction(unsigned int *src, unsigned int *dest, - unsigned int *alt_start, unsigned int *alt_end) +static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest, + struct ppc_inst *alt_start, struct ppc_inst *alt_end) { int err; - unsigned int instr; + struct ppc_inst instr; instr = *src; if (instr_is_relative_branch(*src)) { - unsigned int *target = (unsigned int *)branch_target(src); + struct ppc_inst *target = (struct ppc_inst *)branch_target(src); /* Branch within the section doesn't need translating */ if (target < alt_start || target > alt_end) { @@ -68,7 +68,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) { - unsigned int *start, *end, *alt_start, *alt_end, *src, *dest; + struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest; start = calc_addr(fcur, fcur->start_off); end = calc_addr(fcur, fcur->end_off); @@ -147,15 +147,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instrs[0])); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0])); if (types & STF_BARRIER_FALLBACK) - patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback, + patch_branch((struct ppc_inst *)(dest + 1), + (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK); else - patch_instruction(dest + 1, ppc_inst(instrs[1])); + patch_instruction((struct ppc_inst *)(dest + 1), + ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); + patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); } printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, @@ -208,12 +210,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_instruction(dest + 3, ppc_inst(instrs[3])); - patch_instruction(dest + 4, ppc_inst(instrs[4])); - patch_instruction(dest + 5, ppc_inst(instrs[5])); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0])); + patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1])); + patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); + patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3])); + patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4])); + patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5])); } printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, (types == STF_BARRIER_NONE) ? "no" : @@ -261,9 +263,9 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0])); + patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1])); + patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); } printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, @@ -296,7 +298,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ dest = (void *)start + *start; pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instr)); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instr)); } printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); @@ -339,8 +341,8 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ dest = (void *)start + *start; pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instr[0])); - patch_instruction(dest + 1, ppc_inst(instr[1])); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0])); + patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1])); } printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); @@ -354,7 +356,7 @@ static void patch_btb_flush_section(long *curr) end = (void *)curr + *(curr + 1); for (; start < end; start++) { pr_devel("patching dest %lx\n", (unsigned long)start); - patch_instruction(start, ppc_inst(PPC_INST_NOP)); + patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP)); } } @@ -373,7 +375,7 @@ void do_btb_flush_fixups(void) void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { long *start, *end; - unsigned int *dest; + struct ppc_inst *dest; if (!(value & CPU_FTR_LWSYNC)) return ; @@ -390,18 +392,18 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) static void do_final_fixups(void) { #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) - int *src, *dest; + struct ppc_inst *src, *dest; unsigned long length; if (PHYSICAL_START == 0) return; - src = (int *)(KERNELBASE + PHYSICAL_START); - dest = (int *)KERNELBASE; - length = (__end_interrupts - _stext) / sizeof(int); + src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START); + dest = (struct ppc_inst *)KERNELBASE; + length = (__end_interrupts - _stext) / sizeof(struct ppc_inst); while (length--) { - raw_patch_instruction(dest, ppc_inst(*src)); + raw_patch_instruction(dest, *src); src++; dest++; } diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 7f7be154da7e..95a56bb1ba3f 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1163,7 +1163,7 @@ static nokprobe_inline int trap_compare(long v1, long v2) * otherwise. */ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, - unsigned int instr) + struct ppc_inst instr) { unsigned int opcode, ra, rb, rc, rd, spr, u; unsigned long int imm; @@ -3103,7 +3103,7 @@ NOKPROBE_SYMBOL(emulate_loadstore); * or -1 if the instruction is one that should not be stepped, * such as an rfid, or a mtmsrd that would clear MSR_RI. */ -int emulate_step(struct pt_regs *regs, unsigned int instr) +int emulate_step(struct pt_regs *regs, struct ppc_inst instr) { struct instruction_op op; int r, err, type; diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index b928b21feac1..46af80279ebc 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -462,7 +462,7 @@ struct compute_test { struct { char *descr; unsigned long flags; - unsigned int instr; + struct ppc_inst instr; struct pt_regs regs; } subtests[MAX_SUBTESTS + 1]; }; @@ -843,7 +843,7 @@ static struct compute_test compute_tests[] = { }; static int __init emulate_compute_instr(struct pt_regs *regs, - unsigned int instr) + struct ppc_inst instr) { struct instruction_op op; @@ -861,7 +861,7 @@ static int __init emulate_compute_instr(struct pt_regs *regs, } static int __init execute_compute_instr(struct pt_regs *regs, - unsigned int instr) + struct ppc_inst instr) { extern int exec_instr(struct pt_regs *regs); extern s32 patch__exec_instr; @@ -892,7 +892,8 @@ static void __init run_tests_compute(void) unsigned long flags; struct compute_test *test; struct pt_regs *regs, exp, got; - unsigned int i, j, k, instr; + unsigned int i, j, k; + struct ppc_inst instr; bool ignore_gpr, ignore_xer, ignore_ccr, passed; for (i = 0; i < ARRAY_SIZE(compute_tests); i++) { diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2c23c3076b1e..4f0ef68a7d31 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -47,7 +47,7 @@ * Check whether the instruction inst is a store using * an update addressing form which will update r1. */ -static bool store_updates_sp(unsigned int inst) +static bool store_updates_sp(struct ppc_inst inst) { /* check for 1 in the rA field */ if (((ppc_inst_val(inst) >> 16) & 0x1f) != 1) @@ -305,7 +305,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && access_ok(nip, sizeof(*nip))) { - unsigned int inst; + struct ppc_inst inst; if (!probe_user_read(&inst, nip, sizeof(inst))) return !store_updates_sp(inst); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 3dcfecf858f3..13b9dd5e4a76 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -421,14 +421,14 @@ static __u64 power_pmu_bhrb_to(u64 addr) if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) return 0; - return branch_target(&instr); + return branch_target((struct ppc_inst *)&instr); } /* Userspace: need copy instruction here then translate it */ if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr))) return 0; - target = branch_target(&instr); + target = branch_target((struct ppc_inst *)&instr); if ((!target) || (instr & BRANCH_ABSOLUTE)) return target; diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 31540ebf1e29..dba3aa73c062 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -73,7 +73,7 @@ smp_86xx_kick_cpu(int nr) /* Setup fake reset vector to call __secondary_start_mpc86xx. */ target = (unsigned long) __secondary_start_mpc86xx; - patch_branch(vector, target, BRANCH_SET_LINK); + patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK); /* Kick that CPU */ smp_86xx_release_core(nr); @@ -83,7 +83,7 @@ smp_86xx_kick_cpu(int nr) mdelay(1); /* Restore the exception vector */ - patch_instruction(vector, ppc_inst(save_vector)); + patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector)); local_irq_restore(flags); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 44a00990af9d..9969c07035b6 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -814,7 +814,7 @@ static int smp_core99_kick_cpu(int nr) * b __secondary_start_pmac_0 + nr*8 */ target = (unsigned long) __secondary_start_pmac_0 + nr * 8; - patch_branch(vector, target, BRANCH_SET_LINK); + patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK); /* Put some life in our friend */ pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); @@ -827,7 +827,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - patch_instruction(vector, ppc_inst(save_vector)); + patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector)); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 4cf998518047..2e3b15813cf1 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -100,7 +100,7 @@ static long *xmon_fault_jmp[NR_CPUS]; /* Breakpoint stuff */ struct bpt { unsigned long address; - unsigned int *instr; + struct ppc_inst *instr; atomic_t ref_count; int enabled; unsigned long pad; @@ -876,8 +876,8 @@ static struct bpt *new_breakpoint(unsigned long a) for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { bp->address = a; - bp->instr = bpt_table + ((bp - bpts) * BPT_WORDS); - patch_instruction(bp->instr + 1, bpinstr); + bp->instr = (void *)(bpt_table + ((bp - bpts) * BPT_WORDS)); + patch_instruction(bp->instr + 1, ppc_inst(bpinstr)); return bp; } } @@ -889,7 +889,7 @@ static struct bpt *new_breakpoint(unsigned long a) static void insert_bpts(void) { int i; - unsigned int instr; + struct ppc_inst instr; struct bpt *bp; bp = bpts; @@ -911,8 +911,8 @@ static void insert_bpts(void) patch_instruction(bp->instr, instr); if (bp->enabled & BP_CIABR) continue; - if (patch_instruction((unsigned int *)bp->address, - bpinstr) != 0) { + if (patch_instruction((struct ppc_inst *)bp->address, + ppc_inst(bpinstr)) != 0) { printf("Couldn't write instruction at %lx, " "disabling breakpoint there\n", bp->address); bp->enabled &= ~BP_TRAP; @@ -940,7 +940,7 @@ static void remove_bpts(void) { int i; struct bpt *bp; - unsigned instr; + struct ppc_inst instr; bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) { @@ -949,7 +949,7 @@ static void remove_bpts(void) if (mread(bp->address, &instr, 4) == 4 && ppc_inst_equal(instr, ppc_inst(bpinstr)) && patch_instruction( - (unsigned int *)bp->address, bp->instr[0]) != 0) + (struct ppc_inst *)bp->address, bp->instr[0]) != 0) printf("Couldn't remove breakpoint at %lx\n", bp->address); } @@ -1156,7 +1156,7 @@ static int do_step(struct pt_regs *regs) */ static int do_step(struct pt_regs *regs) { - unsigned int instr; + struct ppc_inst instr; int stepped; force_enable_xmon(); @@ -1322,7 +1322,7 @@ csum(void) */ static long check_bp_loc(unsigned long addr) { - unsigned int instr; + struct ppc_inst instr; addr &= ~3; if (!is_kernel_addr(addr)) { @@ -2848,7 +2848,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, { int nr, dotted; unsigned long first_adr; - unsigned int inst, last_inst = ppc_inst(0); + struct ppc_inst inst, last_inst = ppc_inst(0); unsigned char val[4]; dotted = 0; diff --git a/arch/powerpc/xmon/xmon_bpts.h b/arch/powerpc/xmon/xmon_bpts.h index b7e94375db86..57e6fb03de48 100644 --- a/arch/powerpc/xmon/xmon_bpts.h +++ b/arch/powerpc/xmon/xmon_bpts.h @@ -4,11 +4,11 @@ #define NBPTS 256 #ifndef __ASSEMBLY__ -#define BPT_SIZE (sizeof(unsigned int) * 2) -#define BPT_WORDS (BPT_SIZE / sizeof(unsigned int)) +#include +#define BPT_SIZE (sizeof(struct ppc_inst) * 2) +#define BPT_WORDS (BPT_SIZE / sizeof(struct ppc_inst)) extern unsigned int bpt_table[NBPTS * BPT_WORDS]; - #endif /* __ASSEMBLY__ */ #endif /* XMON_BPTS_H */ -- cgit v1.2.3-59-g8ed1b From f8faaffaa7d99028e457ef2d1dcb43a98f736938 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:32 +1000 Subject: powerpc: Use a function for reading instructions Prefixed instructions will mean there are instructions of different length. As a result dereferencing a pointer to an instruction will not necessarily give the desired result. Introduce a function for reading instructions from memory into the instruction data type. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-13-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 5 +++++ arch/powerpc/kernel/kprobes.c | 6 +++--- arch/powerpc/kernel/mce_power.c | 2 +- arch/powerpc/kernel/optprobes.c | 4 ++-- arch/powerpc/kernel/trace/ftrace.c | 4 ++-- arch/powerpc/kernel/uprobes.c | 2 +- arch/powerpc/lib/code-patching.c | 26 ++++++++++++++------------ arch/powerpc/lib/feature-fixups.c | 4 ++-- arch/powerpc/xmon/xmon.c | 6 +++--- 9 files changed, 33 insertions(+), 26 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index f602ca908936..f9cbb24d2e34 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -27,6 +27,11 @@ static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x) return ppc_inst(swab32(ppc_inst_val(x))); } +static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr) +{ + return *ptr; +} + static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) { return ppc_inst_val(x) == ppc_inst_val(y); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index a08ae5803622..f64312dca84f 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -106,7 +106,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) int arch_prepare_kprobe(struct kprobe *p) { int ret = 0; - struct ppc_inst insn = *(struct ppc_inst *)p->addr; + struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr); if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); @@ -127,7 +127,7 @@ int arch_prepare_kprobe(struct kprobe *p) if (!ret) { memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); - p->opcode = *p->addr; + p->opcode = ppc_inst_val(insn); flush_icache_range((unsigned long)p->ainsn.insn, (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); } @@ -217,7 +217,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe); static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) { int ret; - struct ppc_inst insn = *(struct ppc_inst *)p->ainsn.insn; + struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn); /* regs->nip is also adjusted if emulate_step returns 1 */ ret = emulate_step(regs, insn); diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 08b355f80d9e..c32af49a5138 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -378,7 +378,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, pfn = addr_to_pfn(regs, regs->nip); if (pfn != ULONG_MAX) { instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK); - instr = *(struct ppc_inst *)(instr_addr); + instr = ppc_inst_read((struct ppc_inst *)instr_addr); if (!analyse_instr(&op, &tmp, instr)) { pfn = addr_to_pfn(regs, op.ea); *addr = op.ea; diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 5a71fef71c22..52c1ab3f85aa 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -100,9 +100,9 @@ static unsigned long can_optimize(struct kprobe *p) * Ensure that the instruction is not a conditional branch, * and that can be emulated. */ - if (!is_conditional_branch(*(struct ppc_inst *)p->ainsn.insn) && + if (!is_conditional_branch(ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) && analyse_instr(&op, ®s, - *(struct ppc_inst *)p->ainsn.insn) == 1) { + ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) == 1) { emulate_update_regs(®s, &op); nip = regs.nip; } diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 3117ed675735..acd5b889815f 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -848,7 +848,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) struct ppc_inst old, new; int ret; - old = *(struct ppc_inst *)&ftrace_call; + old = ppc_inst_read((struct ppc_inst *)&ftrace_call); new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); @@ -856,7 +856,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); - old = *(struct ppc_inst *)&ftrace_regs_call; + old = ppc_inst_read((struct ppc_inst *)&ftrace_regs_call); new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); } diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 31c870287f2b..6893d40a48c5 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -174,7 +174,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) * emulate_step() returns 1 if the insn was successfully emulated. * For all other cases, we need to single-step in hardware. */ - ret = emulate_step(regs, auprobe->insn); + ret = emulate_step(regs, ppc_inst_read(&auprobe->insn)); if (ret > 0) return true; diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 1dff9d9d6645..435fc8e9f45d 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -348,9 +348,9 @@ static unsigned long branch_bform_target(const struct ppc_inst *instr) unsigned long branch_target(const struct ppc_inst *instr) { - if (instr_is_branch_iform(*instr)) + if (instr_is_branch_iform(ppc_inst_read(instr))) return branch_iform_target(instr); - else if (instr_is_branch_bform(*instr)) + else if (instr_is_branch_bform(ppc_inst_read(instr))) return branch_bform_target(instr); return 0; @@ -358,7 +358,8 @@ unsigned long branch_target(const struct ppc_inst *instr) int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr) { - if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr)) + if (instr_is_branch_iform(ppc_inst_read(instr)) || + instr_is_branch_bform(ppc_inst_read(instr))) return branch_target(instr) == addr; return 0; @@ -368,13 +369,14 @@ int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest, const struct ppc_inst *src) { unsigned long target; - target = branch_target(src); - if (instr_is_branch_iform(*src)) - return create_branch(instr, dest, target, ppc_inst_val(*src)); - else if (instr_is_branch_bform(*src)) - return create_cond_branch(instr, dest, target, ppc_inst_val(*src)); + if (instr_is_branch_iform(ppc_inst_read(src))) + return create_branch(instr, dest, target, + ppc_inst_val(ppc_inst_read(src))); + else if (instr_is_branch_bform(ppc_inst_read(src))) + return create_cond_branch(instr, dest, target, + ppc_inst_val(ppc_inst_read(src))); return 1; } @@ -598,7 +600,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(ppc_inst_equal(*q, ppc_inst(0x4a000000))); + check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000))); /* Maximum positive case, move x to x - 32 MB + 4 */ p = buf + 0x2000000; @@ -609,7 +611,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(ppc_inst_equal(*q, ppc_inst(0x49fffffc))); + check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc))); /* Jump to x + 16 MB moved to x + 20 MB */ p = buf; @@ -655,7 +657,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(ppc_inst_equal(*q, ppc_inst(0x43ff8000))); + check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000))); /* Maximum positive case, move x to x - 32 KB + 4 */ p = buf + 0x8000; @@ -667,7 +669,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(ppc_inst_equal(*q, ppc_inst(0x43ff7ffc))); + check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc))); /* Jump to x + 12 KB moved to x + 20 KB */ p = buf; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index fd978b8ee6d6..3c55097d406d 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -48,7 +48,7 @@ static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest, int err; struct ppc_inst instr; - instr = *src; + instr = ppc_inst_read(src); if (instr_is_relative_branch(*src)) { struct ppc_inst *target = (struct ppc_inst *)branch_target(src); @@ -403,7 +403,7 @@ static void do_final_fixups(void) length = (__end_interrupts - _stext) / sizeof(struct ppc_inst); while (length--) { - raw_patch_instruction(dest, *src); + raw_patch_instruction(dest, ppc_inst_read(src)); src++; dest++; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 2e3b15813cf1..a4f8f570dbbe 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -702,13 +702,13 @@ static int xmon_core(struct pt_regs *regs, int fromipi) if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { bp = at_breakpoint(regs->nip); if (bp != NULL) { - int stepped = emulate_step(regs, bp->instr[0]); + int stepped = emulate_step(regs, ppc_inst_read(bp->instr)); if (stepped == 0) { regs->nip = (unsigned long) &bp->instr[0]; atomic_inc(&bp->ref_count); } else if (stepped < 0) { printf("Couldn't single-step %s instruction\n", - (IS_RFID(bp->instr[0])? "rfid": "mtmsrd")); + IS_RFID(ppc_inst_read(bp->instr))? "rfid": "mtmsrd"); } } } @@ -949,7 +949,7 @@ static void remove_bpts(void) if (mread(bp->address, &instr, 4) == 4 && ppc_inst_equal(instr, ppc_inst(bpinstr)) && patch_instruction( - (struct ppc_inst *)bp->address, bp->instr[0]) != 0) + (struct ppc_inst *)bp->address, ppc_inst_read(bp->instr)) != 0) printf("Couldn't remove breakpoint at %lx\n", bp->address); } -- cgit v1.2.3-59-g8ed1b From 7ba68b2172c19031fdc2a2caf37328edd146e299 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:33 +1000 Subject: powerpc: Add a probe_user_read_inst() function Introduce a probe_user_read_inst() function to use in cases where probe_user_read() is used for getting an instruction. This will be more useful for prefixed instructions. Signed-off-by: Jordan Niethe Reviewed-by: Alistair Popple [mpe: Don't write to *inst on error, fold in __user annotations] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-14-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 3 +++ arch/powerpc/lib/Makefile | 2 +- arch/powerpc/lib/inst.c | 20 ++++++++++++++++++++ arch/powerpc/mm/fault.c | 4 ++-- 4 files changed, 26 insertions(+), 3 deletions(-) create mode 100644 arch/powerpc/lib/inst.c diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index f9cbb24d2e34..4db0ecee2698 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -37,4 +37,7 @@ static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) return ppc_inst_val(x) == ppc_inst_val(y); } +int probe_user_read_inst(struct ppc_inst *inst, + struct ppc_inst __user *nip); + #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index b8de3be10eb4..546591848219 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -16,7 +16,7 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING endif -obj-y += alloc.o code-patching.o feature-fixups.o pmem.o +obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o ifndef CONFIG_KASAN obj-y += string.o memcmp_$(BITS).o diff --git a/arch/powerpc/lib/inst.c b/arch/powerpc/lib/inst.c new file mode 100644 index 000000000000..605220dbb6ba --- /dev/null +++ b/arch/powerpc/lib/inst.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2020, IBM Corporation. + */ + +#include +#include + +int probe_user_read_inst(struct ppc_inst *inst, + struct ppc_inst __user *nip) +{ + unsigned int val; + int err; + + err = probe_user_read(&val, nip, sizeof(val)); + if (!err) + *inst = ppc_inst(val); + + return err; +} diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 4f0ef68a7d31..2393ed9d84bb 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -282,7 +282,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, * expand to 1MB without further checks. */ if (address + 0x100000 < vma->vm_end) { - unsigned int __user *nip = (unsigned int __user *)regs->nip; + struct ppc_inst __user *nip = (struct ppc_inst __user *)regs->nip; /* get user regs even if this fault is in kernel mode */ struct pt_regs *uregs = current->thread.regs; if (uregs == NULL) @@ -307,7 +307,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, access_ok(nip, sizeof(*nip))) { struct ppc_inst inst; - if (!probe_user_read(&inst, nip, sizeof(inst))) + if (!probe_user_read_inst(&inst, nip)) return !store_updates_sp(inst); *must_retry = true; } -- cgit v1.2.3-59-g8ed1b From 95b980a00d1220ca67550a933166704db8bc5c14 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:34 +1000 Subject: powerpc: Add a probe_kernel_read_inst() function Introduce a probe_kernel_read_inst() function to use in cases where probe_kernel_read() is used for getting an instruction. This will be more useful for prefixed instructions. Signed-off-by: Jordan Niethe Reviewed-by: Alistair Popple [mpe: Don't write to *inst on error] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-15-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 3 +++ arch/powerpc/kernel/trace/ftrace.c | 23 +++++++++++++---------- arch/powerpc/lib/inst.c | 13 +++++++++++++ 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index 4db0ecee2698..e7e779805090 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -40,4 +40,7 @@ static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) int probe_user_read_inst(struct ppc_inst *inst, struct ppc_inst __user *nip); +int probe_kernel_read_inst(struct ppc_inst *inst, + struct ppc_inst *src); + #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index acd5b889815f..5e399628f51a 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -68,7 +68,7 @@ ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new) */ /* read the text we want to modify */ - if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) + if (probe_kernel_read_inst(&replaced, (void *)ip)) return -EFAULT; /* Make sure it is what we expect it to be */ @@ -130,7 +130,7 @@ __ftrace_make_nop(struct module *mod, struct ppc_inst op, pop; /* read where this goes */ - if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { + if (probe_kernel_read_inst(&op, (void *)ip)) { pr_err("Fetching opcode failed.\n"); return -EFAULT; } @@ -164,7 +164,7 @@ __ftrace_make_nop(struct module *mod, /* When using -mkernel_profile there is no load to jump over */ pop = ppc_inst(PPC_INST_NOP); - if (probe_kernel_read(&op, (void *)(ip - 4), 4)) { + if (probe_kernel_read_inst(&op, (void *)(ip - 4))) { pr_err("Fetching instruction at %lx failed.\n", ip - 4); return -EFAULT; } @@ -197,7 +197,7 @@ __ftrace_make_nop(struct module *mod, * Check what is in the next instruction. We can see ld r2,40(r1), but * on first pass after boot we will see mflr r0. */ - if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) { + if (probe_kernel_read_inst(&op, (void *)(ip + 4))) { pr_err("Fetching op failed.\n"); return -EFAULT; } @@ -349,7 +349,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) return -1; /* New trampoline -- read where this goes */ - if (probe_kernel_read(&op, (void *)tramp, sizeof(int))) { + if (probe_kernel_read_inst(&op, (void *)tramp)) { pr_debug("Fetching opcode failed.\n"); return -1; } @@ -399,7 +399,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) struct ppc_inst op; /* Read where this goes */ - if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { + if (probe_kernel_read_inst(&op, (void *)ip)) { pr_err("Fetching opcode failed.\n"); return -EFAULT; } @@ -526,7 +526,10 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) struct module *mod = rec->arch.mod; /* read where this goes */ - if (probe_kernel_read(op, ip, sizeof(op))) + if (probe_kernel_read_inst(op, ip)) + return -EFAULT; + + if (probe_kernel_read_inst(op + 1, ip + 4)) return -EFAULT; if (!expected_nop_sequence(ip, op[0], op[1])) { @@ -589,7 +592,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned long ip = rec->ip; /* read where this goes */ - if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) + if (probe_kernel_read_inst(&op, (void *)ip)) return -EFAULT; /* It should be pointing to a nop */ @@ -645,7 +648,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) } /* Make sure we have a nop */ - if (probe_kernel_read(&op, ip, sizeof(op))) { + if (probe_kernel_read_inst(&op, ip)) { pr_err("Unable to read ftrace location %p\n", ip); return -EFAULT; } @@ -723,7 +726,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } /* read where this goes */ - if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { + if (probe_kernel_read_inst(&op, (void *)ip)) { pr_err("Fetching opcode failed.\n"); return -EFAULT; } diff --git a/arch/powerpc/lib/inst.c b/arch/powerpc/lib/inst.c index 605220dbb6ba..bf3126ee399d 100644 --- a/arch/powerpc/lib/inst.c +++ b/arch/powerpc/lib/inst.c @@ -18,3 +18,16 @@ int probe_user_read_inst(struct ppc_inst *inst, return err; } + +int probe_kernel_read_inst(struct ppc_inst *inst, + struct ppc_inst *src) +{ + unsigned int val; + int err; + + err = probe_kernel_read(&val, src, sizeof(val)); + if (!err) + *inst = ppc_inst(val); + + return err; +} -- cgit v1.2.3-59-g8ed1b From a8646f43ba5046e7f5c4396125d5136bfcb17b49 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:35 +1000 Subject: powerpc/kprobes: Use patch_instruction() Instead of using memcpy() and flush_icache_range() use patch_instruction() which not only accomplishes both of these steps but will also make it easier to add support for prefixed instructions. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-16-jniethe5@gmail.com --- arch/powerpc/kernel/kprobes.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index f64312dca84f..a72c8e1a42ad 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -125,11 +125,8 @@ int arch_prepare_kprobe(struct kprobe *p) } if (!ret) { - memcpy(p->ainsn.insn, p->addr, - MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + patch_instruction((struct ppc_inst *)p->ainsn.insn, insn); p->opcode = ppc_inst_val(insn); - flush_icache_range((unsigned long)p->ainsn.insn, - (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); } p->ainsn.boostable = 0; -- cgit v1.2.3-59-g8ed1b From 5249385ad7f0ac178433f0ae9cc5b64612c8ff77 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:36 +1000 Subject: powerpc: Define and use get_user_instr() et. al. Define specialised get_user_instr(), __get_user_instr() and __get_user_instr_inatomic() macros for reading instructions from user and/or kernel space. Signed-off-by: Jordan Niethe Reviewed-by: Alistair Popple [mpe: Squash in addition of get_user_instr() & __user annotations] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-17-jniethe5@gmail.com --- arch/powerpc/include/asm/uaccess.h | 9 +++++++++ arch/powerpc/kernel/align.c | 2 +- arch/powerpc/kernel/hw_breakpoint.c | 2 +- arch/powerpc/kernel/vecemu.c | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 2f500debae21..0d2d0c3dc527 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -105,6 +105,15 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #define __put_user_inatomic(x, ptr) \ __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define get_user_instr(x, ptr) \ + get_user((x).val, (u32 __user *)(ptr)) + +#define __get_user_instr(x, ptr) \ + __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true) + +#define __get_user_instr_inatomic(x, ptr) \ + __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32)) + extern long __put_user_bad(void); /* diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 9b35d6160507..1f1ce8b86d5b 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -305,7 +305,7 @@ int fix_alignment(struct pt_regs *regs) */ CHECK_FULL_REGS(regs); - if (unlikely(__get_user(instr.val, (unsigned int __user *)regs->nip))) + if (unlikely(__get_user_instr(instr, (void __user *)regs->nip))) return -EFAULT; if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { /* We don't handle PPC little-endian any more... */ diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 2db9a7ac7bcb..423603c92c0f 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -249,7 +249,7 @@ static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp, struct instruction_op op; unsigned long addr = info->address; - if (__get_user_inatomic(instr.val, (unsigned int *)regs->nip)) + if (__get_user_instr_inatomic(instr, (void __user *)regs->nip)) goto fail; ret = analyse_instr(&op, regs, instr); diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index 3dd70eeb10c5..ae632569446f 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -266,7 +266,7 @@ int emulate_altivec(struct pt_regs *regs) unsigned int va, vb, vc, vd; vector128 *vrs; - if (get_user(instr.val, (unsigned int __user *)regs->nip)) + if (get_user_instr(instr, (void __user *)regs->nip)) return -EFAULT; word = ppc_inst_val(instr); -- cgit v1.2.3-59-g8ed1b From 622cf6f436a12338bbcfbb3474629755547fd112 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:37 +1000 Subject: powerpc: Introduce a function for reporting instruction length Currently all instructions have the same length, but in preparation for prefixed instructions introduce a function for returning instruction length. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-18-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 5 +++++ arch/powerpc/kernel/kprobes.c | 6 ++++-- arch/powerpc/kernel/uprobes.c | 2 +- arch/powerpc/lib/feature-fixups.c | 14 +++++++------- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index e7e779805090..c7ea70e73073 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -17,6 +17,11 @@ static inline u32 ppc_inst_val(struct ppc_inst x) return x.val; } +static inline int ppc_inst_len(struct ppc_inst x) +{ + return sizeof(struct ppc_inst); +} + static inline int ppc_inst_primary_opcode(struct ppc_inst x) { return ppc_inst_val(x) >> 26; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index a72c8e1a42ad..33d54b091c70 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -462,14 +462,16 @@ NOKPROBE_SYMBOL(trampoline_probe_handler); */ int kprobe_post_handler(struct pt_regs *regs) { + int len; struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (!cur || user_mode(regs)) return 0; + len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn)); /* make sure we got here for instruction we have a kprobe on */ - if (((unsigned long)cur->ainsn.insn + 4) != regs->nip) + if (((unsigned long)cur->ainsn.insn + len) != regs->nip) return 0; if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { @@ -478,7 +480,7 @@ int kprobe_post_handler(struct pt_regs *regs) } /* Adjust nip to after the single-stepped instruction */ - regs->nip = (unsigned long)cur->addr + 4; + regs->nip = (unsigned long)cur->addr + len; regs->msr |= kcb->kprobe_saved_msr; /*Restore back the original saved kprobes variables and continue. */ diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 6893d40a48c5..83e883e1a42d 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -112,7 +112,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) * support doesn't exist and have to fix-up the next instruction * to be executed. */ - regs->nip = utask->vaddr + MAX_UINSN_BYTES; + regs->nip = utask->vaddr + ppc_inst_len(ppc_inst_read(&auprobe->insn)); user_disable_single_step(current); return 0; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 3c55097d406d..0c9ffdef8096 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -392,20 +392,20 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) static void do_final_fixups(void) { #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) - struct ppc_inst *src, *dest; - unsigned long length; + struct ppc_inst inst, *src, *dest, *end; if (PHYSICAL_START == 0) return; src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START); dest = (struct ppc_inst *)KERNELBASE; - length = (__end_interrupts - _stext) / sizeof(struct ppc_inst); + end = (void *)src + (__end_interrupts - _stext); - while (length--) { - raw_patch_instruction(dest, ppc_inst_read(src)); - src++; - dest++; + while (src < end) { + inst = ppc_inst_read(src); + raw_patch_instruction(dest, inst); + src = (void *)src + ppc_inst_len(inst); + dest = (void *)dest + ppc_inst_len(inst); } #endif } -- cgit v1.2.3-59-g8ed1b From 6c7a4f0a9f66fc7fdc6e208559e5d562f53e0991 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:38 +1000 Subject: powerpc/xmon: Use a function for reading instructions Currently in xmon, mread() is used for reading instructions. In preparation for prefixed instructions, create and use a new function, mread_instr(), especially for reading instructions. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-19-jniethe5@gmail.com --- arch/powerpc/xmon/xmon.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a4f8f570dbbe..d8b29f6925be 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -122,6 +122,7 @@ static unsigned bpinstr = 0x7fe00008; /* trap */ static int cmds(struct pt_regs *); static int mread(unsigned long, void *, int); static int mwrite(unsigned long, void *, int); +static int mread_instr(unsigned long, struct ppc_inst *); static int handle_fault(struct pt_regs *); static void byterev(unsigned char *, int); static void memex(void); @@ -896,7 +897,7 @@ static void insert_bpts(void) for (i = 0; i < NBPTS; ++i, ++bp) { if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0) continue; - if (mread(bp->address, &instr, 4) != 4) { + if (!mread_instr(bp->address, &instr)) { printf("Couldn't read instruction at %lx, " "disabling breakpoint there\n", bp->address); bp->enabled = 0; @@ -946,7 +947,7 @@ static void remove_bpts(void) for (i = 0; i < NBPTS; ++i, ++bp) { if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP) continue; - if (mread(bp->address, &instr, 4) == 4 + if (mread_instr(bp->address, &instr) && ppc_inst_equal(instr, ppc_inst(bpinstr)) && patch_instruction( (struct ppc_inst *)bp->address, ppc_inst_read(bp->instr)) != 0) @@ -1162,7 +1163,7 @@ static int do_step(struct pt_regs *regs) force_enable_xmon(); /* check we are in 64-bit kernel mode, translation enabled */ if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) { - if (mread(regs->nip, &instr, 4) == 4) { + if (mread_instr(regs->nip, &instr)) { stepped = emulate_step(regs, instr); if (stepped < 0) { printf("Couldn't single-step %s instruction\n", @@ -1329,7 +1330,7 @@ static long check_bp_loc(unsigned long addr) printf("Breakpoints may only be placed at kernel addresses\n"); return 0; } - if (!mread(addr, &instr, sizeof(instr))) { + if (!mread_instr(addr, &instr)) { printf("Can't read instruction at address %lx\n", addr); return 0; } @@ -2122,6 +2123,25 @@ mwrite(unsigned long adrs, void *buf, int size) return n; } +static int +mread_instr(unsigned long adrs, struct ppc_inst *instr) +{ + volatile int n; + + n = 0; + if (setjmp(bus_error_jmp) == 0) { + catch_memory_errors = 1; + sync(); + *instr = ppc_inst_read((struct ppc_inst *)adrs); + sync(); + /* wait a little while to see if we get a machine check */ + __delay(200); + n = ppc_inst_len(*instr); + } + catch_memory_errors = 0; + return n; +} + static int fault_type; static int fault_except; static char *fault_chars[] = { "--", "**", "##" }; -- cgit v1.2.3-59-g8ed1b From 7fccfcfba04f9cb46438f368755d368f6c57f3a0 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:39 +1000 Subject: powerpc/xmon: Move insertion of breakpoint for xol'ing When a new breakpoint is created, the second instruction of that breakpoint is patched with a trap instruction. This assumes the length of the instruction is always the same. In preparation for prefixed instructions, remove this assumption. Insert the trap instruction at the same time the first instruction is inserted. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-20-jniethe5@gmail.com --- arch/powerpc/xmon/xmon.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index d8b29f6925be..00b24f357c2b 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -878,7 +878,6 @@ static struct bpt *new_breakpoint(unsigned long a) if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { bp->address = a; bp->instr = (void *)(bpt_table + ((bp - bpts) * BPT_WORDS)); - patch_instruction(bp->instr + 1, ppc_inst(bpinstr)); return bp; } } @@ -910,6 +909,8 @@ static void insert_bpts(void) continue; } patch_instruction(bp->instr, instr); + patch_instruction((void *)bp->instr + ppc_inst_len(instr), + ppc_inst(bpinstr)); if (bp->enabled & BP_CIABR) continue; if (patch_instruction((struct ppc_inst *)bp->address, -- cgit v1.2.3-59-g8ed1b From 0b582db5490a1f250ef63337dd46d5c7599dae80 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:40 +1000 Subject: powerpc: Make test_translate_branch() independent of instruction length test_translate_branch() uses two pointers to instructions within a buffer, p and q, to test patch_branch(). The pointer arithmetic done on them assumes a size of 4. This will not work if the instruction length changes. Instead do the arithmetic relative to the void * to the buffer. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-21-jniethe5@gmail.com --- arch/powerpc/lib/code-patching.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 435fc8e9f45d..d946f7d6bb32 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -572,7 +572,7 @@ static void __init test_branch_bform(void) static void __init test_translate_branch(void) { unsigned long addr; - struct ppc_inst *p, *q; + void *p, *q; struct ppc_inst instr; void *buf; @@ -586,7 +586,7 @@ static void __init test_translate_branch(void) addr = (unsigned long)p; patch_branch(p, addr, 0); check(instr_is_branch_to_addr(p, addr)); - q = p + 1; + q = p + 4; translate_branch(&instr, q, p); patch_instruction(q, instr); check(instr_is_branch_to_addr(q, addr)); @@ -642,7 +642,7 @@ static void __init test_translate_branch(void) create_cond_branch(&instr, p, addr, 0); patch_instruction(p, instr); check(instr_is_branch_to_addr(p, addr)); - q = p + 1; + q = buf + 4; translate_branch(&instr, q, p); patch_instruction(q, instr); check(instr_is_branch_to_addr(q, addr)); -- cgit v1.2.3-59-g8ed1b From 2aa6195e43b3740258ead93aee42ac719dd4c4b0 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Wed, 6 May 2020 13:40:41 +1000 Subject: powerpc: Enable Prefixed Instructions Prefix instructions have their own FSCR bit which needs to enabled via a CPU feature. The kernel will save the FSCR for problem state but it needs to be enabled initially. If prefixed instructions are made unavailable by the [H]FSCR, attempting to use them will cause a facility unavailable exception. Add "PREFIX" to the facility_strings[]. Currently there are no prefixed instructions that are actually emulated by emulate_instruction() within facility_unavailable_exception(). However, when caused by a prefixed instructions the SRR1 PREFIXED bit is set. Prepare for dealing with emulated prefixed instructions by checking for this bit. Signed-off-by: Alistair Popple Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Nicholas Piggin Link: https://lore.kernel.org/r/20200506034050.24806-22-jniethe5@gmail.com --- arch/powerpc/include/asm/reg.h | 3 +++ arch/powerpc/kernel/traps.c | 1 + 2 files changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index da5cab038e25..773f76402392 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -397,6 +397,7 @@ #define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */ /* HFSCR and FSCR bit numbers are the same */ +#define FSCR_PREFIX_LG 13 /* Enable Prefix Instructions */ #define FSCR_SCV_LG 12 /* Enable System Call Vectored */ #define FSCR_MSGP_LG 10 /* Enable MSGP */ #define FSCR_TAR_LG 8 /* Enable Target Address Register */ @@ -408,11 +409,13 @@ #define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */ #define FSCR_FP_LG 0 /* Enable Floating Point */ #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ +#define FSCR_PREFIX __MASK(FSCR_PREFIX_LG) #define FSCR_SCV __MASK(FSCR_SCV_LG) #define FSCR_TAR __MASK(FSCR_TAR_LG) #define FSCR_EBB __MASK(FSCR_EBB_LG) #define FSCR_DSCR __MASK(FSCR_DSCR_LG) #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ +#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG) #define HFSCR_MSGP __MASK(FSCR_MSGP_LG) #define HFSCR_TAR __MASK(FSCR_TAR_LG) #define HFSCR_EBB __MASK(FSCR_EBB_LG) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 477befcda8d3..e37bf7945d27 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1737,6 +1737,7 @@ void facility_unavailable_exception(struct pt_regs *regs) [FSCR_TAR_LG] = "TAR", [FSCR_MSGP_LG] = "MSGP", [FSCR_SCV_LG] = "SCV", + [FSCR_PREFIX_LG] = "PREFIX", }; char *facility = "unknown"; u64 value; -- cgit v1.2.3-59-g8ed1b From b691505ef9232a6e82f1c160911afcb4cb20487b Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:42 +1000 Subject: powerpc: Define new SRR1 bits for a ISA v3.1 Add the BOUNDARY SRR1 bit definition for when the cause of an alignment exception is a prefixed instruction that crosses a 64-byte boundary. Add the PREFIXED SRR1 bit definition for exceptions caused by prefixed instructions. Bit 35 of SRR1 is called SRR1_ISI_N_OR_G. This name comes from it being used to indicate that an ISI was due to the access being no-exec or guarded. ISA v3.1 adds another purpose. It is also set if there is an access in a cache-inhibited location for prefixed instruction. Rename from SRR1_ISI_N_OR_G to SRR1_ISI_N_G_OR_CIP. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-23-jniethe5@gmail.com --- arch/powerpc/include/asm/reg.h | 4 +++- arch/powerpc/kvm/book3s_hv_nested.c | 2 +- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 773f76402392..f95eb8f97756 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -762,7 +762,7 @@ #endif #define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */ -#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ +#define SRR1_ISI_N_G_OR_CIP 0x10000000 /* ISI: Access is no-exec or G or CI for a prefixed instruction */ #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */ @@ -789,6 +789,8 @@ #define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ #define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */ +#define SRR1_BOUNDARY 0x10000000 /* Prefixed instruction crosses 64-byte boundary */ +#define SRR1_PREFIXED 0x20000000 /* Exception caused by prefixed instruction */ #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 99011f1b772a..66c38ee37fd5 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1182,7 +1182,7 @@ static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu, } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { /* Can we execute? */ if (!gpte_p->may_execute) { - flags |= SRR1_ISI_N_OR_G; + flags |= SRR1_ISI_N_G_OR_CIP; goto forward_to_l1; } } else { diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 3b168c69d503..88da2764c1bb 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -1240,7 +1240,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */ if (!data) { if (gr & (HPTE_R_N | HPTE_R_G)) - return status | SRR1_ISI_N_OR_G; + return status | SRR1_ISI_N_G_OR_CIP; if (!hpte_read_permission(pp, slb_v & key)) return status | SRR1_ISI_PROT; } else if (status & DSISR_ISSTORE) { -- cgit v1.2.3-59-g8ed1b From 7a8818e0df5c6b53c89c7c928498668a2bbb3de0 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Fri, 15 May 2020 11:15:28 +1000 Subject: powerpc/optprobes: Add register argument to patch_imm64_load_insns() Currently patch_imm32_load_insns() is used to load an instruction to r4 to be emulated by emulate_step(). For prefixed instructions we would like to be able to load a 64bit immediate to r4. To prepare for this make patch_imm64_load_insns() take an argument that decides which register to load an immediate to - rather than hardcoding r3. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200516115449.4168796-1-mpe@ellerman.id.au --- arch/powerpc/kernel/optprobes.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 52c1ab3f85aa..8eea8dbb93fa 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -162,38 +162,38 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) /* * Generate instructions to load provided immediate 64-bit value - * to register 'r3' and patch these instructions at 'addr'. + * to register 'reg' and patch these instructions at 'addr'. */ -void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) +void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) { - /* lis r3,(op)@highest */ + /* lis reg,(op)@highest */ patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | + ppc_inst(PPC_INST_ADDIS | ___PPC_RT(reg) | ((val >> 48) & 0xffff))); addr++; - /* ori r3,r3,(op)@higher */ + /* ori reg,reg,(op)@higher */ patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 32) & 0xffff))); + ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) | + ___PPC_RS(reg) | ((val >> 32) & 0xffff))); addr++; - /* rldicr r3,r3,32,31 */ + /* rldicr reg,reg,32,31 */ patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | - ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); + ppc_inst(PPC_INST_RLDICR | ___PPC_RA(reg) | + ___PPC_RS(reg) | __PPC_SH64(32) | __PPC_ME64(31))); addr++; - /* oris r3,r3,(op)@h */ + /* oris reg,reg,(op)@h */ patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 16) & 0xffff))); + ppc_inst(PPC_INST_ORIS | ___PPC_RA(reg) | + ___PPC_RS(reg) | ((val >> 16) & 0xffff))); addr++; - /* ori r3,r3,(op)@l */ + /* ori reg,reg,(op)@l */ patch_instruction((struct ppc_inst *)addr, - ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | (val & 0xffff))); + ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) | + ___PPC_RS(reg) | (val & 0xffff))); } int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) @@ -249,7 +249,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) * Fixup the template with instructions to: * 1. load the address of the actual probepoint */ - patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX); + patch_imm64_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX); /* * 2. branch to optimized_callback() and emulate_step() -- cgit v1.2.3-59-g8ed1b From 650b55b707fdfa764e9f2b81314d3eb4216fb962 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Fri, 15 May 2020 12:12:55 +1000 Subject: powerpc: Add prefixed instructions to instruction data type For powerpc64, redefine the ppc_inst type so both word and prefixed instructions can be represented. On powerpc32 the type will remain the same. Update places which had assumed instructions to be 4 bytes long. Signed-off-by: Jordan Niethe Reviewed-by: Alistair Popple [mpe: Rework the get_user_inst() macros to be parameterised, and don't assign to the dest if an error occurred. Use CONFIG_PPC64 not __powerpc64__ in a few places. Address other comments from Christophe. Fix some sparse complaints.] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-24-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 70 ++++++++++++++++++++++++++++++++--- arch/powerpc/include/asm/kprobes.h | 2 +- arch/powerpc/include/asm/ppc-opcode.h | 3 ++ arch/powerpc/include/asm/uaccess.h | 36 ++++++++++++++++++ arch/powerpc/include/asm/uprobes.h | 2 +- arch/powerpc/kernel/crash_dump.c | 2 +- arch/powerpc/kernel/optprobes.c | 6 ++- arch/powerpc/kernel/optprobes_head.S | 3 ++ arch/powerpc/lib/code-patching.c | 13 ++++++- arch/powerpc/lib/feature-fixups.c | 5 ++- arch/powerpc/lib/inst.c | 40 ++++++++++++++++++++ arch/powerpc/lib/sstep.c | 4 +- arch/powerpc/xmon/xmon.c | 4 +- arch/powerpc/xmon/xmon_bpts.S | 2 + 14 files changed, 175 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index c7ea70e73073..d82e0c99cfa1 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -2,29 +2,80 @@ #ifndef _ASM_POWERPC_INST_H #define _ASM_POWERPC_INST_H +#include + /* * Instruction data type for POWER */ struct ppc_inst { u32 val; +#ifdef CONFIG_PPC64 + u32 suffix; +#endif } __packed; -#define ppc_inst(x) ((struct ppc_inst){ .val = x }) - static inline u32 ppc_inst_val(struct ppc_inst x) { return x.val; } -static inline int ppc_inst_len(struct ppc_inst x) +static inline int ppc_inst_primary_opcode(struct ppc_inst x) { - return sizeof(struct ppc_inst); + return ppc_inst_val(x) >> 26; } -static inline int ppc_inst_primary_opcode(struct ppc_inst x) +#ifdef CONFIG_PPC64 +#define ppc_inst(x) ((struct ppc_inst){ .val = (x), .suffix = 0xff }) + +#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) }) + +static inline u32 ppc_inst_suffix(struct ppc_inst x) { - return ppc_inst_val(x) >> 26; + return x.suffix; +} + +static inline bool ppc_inst_prefixed(struct ppc_inst x) +{ + return (ppc_inst_primary_opcode(x) == 1) && ppc_inst_suffix(x) != 0xff; +} + +static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x) +{ + return ppc_inst_prefix(swab32(ppc_inst_val(x)), + swab32(ppc_inst_suffix(x))); +} + +static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr) +{ + u32 val, suffix; + + val = *(u32 *)ptr; + if ((val >> 26) == OP_PREFIX) { + suffix = *((u32 *)ptr + 1); + return ppc_inst_prefix(val, suffix); + } else { + return ppc_inst(val); + } +} + +static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) +{ + return *(u64 *)&x == *(u64 *)&y; +} + +#else + +#define ppc_inst(x) ((struct ppc_inst){ .val = x }) + +static inline bool ppc_inst_prefixed(struct ppc_inst x) +{ + return false; +} + +static inline u32 ppc_inst_suffix(struct ppc_inst x) +{ + return 0; } static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x) @@ -42,6 +93,13 @@ static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) return ppc_inst_val(x) == ppc_inst_val(y); } +#endif /* CONFIG_PPC64 */ + +static inline int ppc_inst_len(struct ppc_inst x) +{ + return ppc_inst_prefixed(x) ? 8 : 4; +} + int probe_user_read_inst(struct ppc_inst *inst, struct ppc_inst __user *nip); diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 66b3f2983b22..4fc0e15e23a5 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -43,7 +43,7 @@ extern kprobe_opcode_t optprobe_template_ret[]; extern kprobe_opcode_t optprobe_template_end[]; /* Fixed instruction size for powerpc */ -#define MAX_INSN_SIZE 1 +#define MAX_INSN_SIZE 2 #define MAX_OPTIMIZED_LENGTH sizeof(kprobe_opcode_t) /* 4 bytes */ #define MAX_OPTINSN_SIZE (optprobe_template_end - optprobe_template_entry) #define RELATIVEJUMP_SIZE sizeof(kprobe_opcode_t) /* 4 bytes */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index c1df75edde44..2a39c716c343 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -158,6 +158,9 @@ /* VMX Vector Store Instructions */ #define OP_31_XOP_STVX 231 +/* Prefixed Instructions */ +#define OP_PREFIX 1 + #define OP_31 31 #define OP_LWZ 32 #define OP_STFS 52 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 0d2d0c3dc527..c0523efa1458 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -105,6 +105,40 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #define __put_user_inatomic(x, ptr) \ __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#ifdef CONFIG_PPC64 + +#define ___get_user_instr(gu_op, dest, ptr) \ +({ \ + long __gui_ret = 0; \ + unsigned long __gui_ptr = (unsigned long)ptr; \ + struct ppc_inst __gui_inst; \ + unsigned int __prefix, __suffix; \ + __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \ + if (__gui_ret == 0) { \ + if ((__prefix >> 26) == OP_PREFIX) { \ + __gui_ret = gu_op(__suffix, \ + (unsigned int __user *)__gui_ptr + 1); \ + __gui_inst = ppc_inst_prefix(__prefix, \ + __suffix); \ + } else { \ + __gui_inst = ppc_inst(__prefix); \ + } \ + if (__gui_ret == 0) \ + (dest) = __gui_inst; \ + } \ + __gui_ret; \ +}) + +#define get_user_instr(x, ptr) \ + ___get_user_instr(get_user, x, ptr) + +#define __get_user_instr(x, ptr) \ + ___get_user_instr(__get_user, x, ptr) + +#define __get_user_instr_inatomic(x, ptr) \ + ___get_user_instr(__get_user_inatomic, x, ptr) + +#else /* !CONFIG_PPC64 */ #define get_user_instr(x, ptr) \ get_user((x).val, (u32 __user *)(ptr)) @@ -114,6 +148,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #define __get_user_instr_inatomic(x, ptr) \ __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32)) +#endif /* CONFIG_PPC64 */ + extern long __put_user_bad(void); /* diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 7e3b329ba2d3..5bf65f5d44a9 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -15,7 +15,7 @@ typedef ppc_opcode_t uprobe_opcode_t; -#define MAX_UINSN_BYTES 4 +#define MAX_UINSN_BYTES 8 #define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES) /* The following alias is needed for reference from arch-agnostic code */ diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 72bafb47e757..735e89337398 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -46,7 +46,7 @@ static void __init create_trampoline(unsigned long addr) * two instructions it doesn't require any registers. */ patch_instruction(p, ppc_inst(PPC_INST_NOP)); - patch_branch(++p, addr + PHYSICAL_START, 0); + patch_branch((void *)p + 4, addr + PHYSICAL_START, 0); } void __init setup_kdump_trampoline(void) diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 8eea8dbb93fa..3ac105e7faae 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -198,7 +198,7 @@ void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr) int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) { - struct ppc_inst branch_op_callback, branch_emulate_step; + struct ppc_inst branch_op_callback, branch_emulate_step, temp; kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff; long b_offset; unsigned long nip, size; @@ -282,7 +282,9 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) /* * 3. load instruction to be emulated into relevant register, and */ - patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX); + temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn); + patch_imm64_load_insns(ppc_inst_val(temp) | ((u64)ppc_inst_suffix(temp) << 32), + 4, buff + TMPL_INSN_IDX); /* * 4. branch back from trampoline diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index cf383520843f..ff8ba4d3824d 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -94,6 +94,9 @@ optprobe_template_insn: /* 2, Pass instruction to be emulated in r4 */ nop nop + nop + nop + nop .global optprobe_template_call_emulate optprobe_template_call_emulate: diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index d946f7d6bb32..e9a0ea1c7ba4 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -24,7 +24,18 @@ static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr { int err = 0; - __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw"); + if (!ppc_inst_prefixed(instr)) { + __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw"); + } else { +#ifdef CONFIG_CPU_LITTLE_ENDIAN + __put_user_asm((u64)ppc_inst_suffix(instr) << 32 | + ppc_inst_val(instr), patch_addr, err, "std"); +#else + __put_user_asm((u64)ppc_inst_val(instr) << 32 | + ppc_inst_suffix(instr), patch_addr, err, "std"); +#endif + } + if (err) return err; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 0c9ffdef8096..1fb845f60f43 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -84,12 +84,13 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) src = alt_start; dest = start; - for (; src < alt_end; src++, dest++) { + for (; src < alt_end; src = (void *)src + ppc_inst_len(ppc_inst_read(src)), + (dest = (void *)dest + ppc_inst_len(ppc_inst_read(dest)))) { if (patch_alt_instruction(src, dest, alt_start, alt_end)) return 1; } - for (; dest < end; dest++) + for (; dest < end; dest = (void *)dest + ppc_inst_len(ppc_inst(PPC_INST_NOP))) raw_patch_instruction(dest, ppc_inst(PPC_INST_NOP)); return 0; diff --git a/arch/powerpc/lib/inst.c b/arch/powerpc/lib/inst.c index bf3126ee399d..aedfd6e31e53 100644 --- a/arch/powerpc/lib/inst.c +++ b/arch/powerpc/lib/inst.c @@ -4,8 +4,47 @@ */ #include +#include #include +#include +#ifdef CONFIG_PPC64 +int probe_user_read_inst(struct ppc_inst *inst, + struct ppc_inst __user *nip) +{ + unsigned int val, suffix; + int err; + + err = probe_user_read(&val, nip, sizeof(val)); + if (err) + return err; + if (get_op(val) == OP_PREFIX) { + err = probe_user_read(&suffix, (void __user *)nip + 4, 4); + *inst = ppc_inst_prefix(val, suffix); + } else { + *inst = ppc_inst(val); + } + return err; +} + +int probe_kernel_read_inst(struct ppc_inst *inst, + struct ppc_inst *src) +{ + unsigned int val, suffix; + int err; + + err = probe_kernel_read(&val, src, sizeof(val)); + if (err) + return err; + if (get_op(val) == OP_PREFIX) { + err = probe_kernel_read(&suffix, (void *)src + 4, 4); + *inst = ppc_inst_prefix(val, suffix); + } else { + *inst = ppc_inst(val); + } + return err; +} +#else /* !CONFIG_PPC64 */ int probe_user_read_inst(struct ppc_inst *inst, struct ppc_inst __user *nip) { @@ -31,3 +70,4 @@ int probe_kernel_read_inst(struct ppc_inst *inst, return err; } +#endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 95a56bb1ba3f..ecd756c346fd 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1169,10 +1169,12 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, unsigned long int imm; unsigned long int val, val2; unsigned int mb, me, sh; - unsigned int word; + unsigned int word, suffix; long ival; word = ppc_inst_val(instr); + suffix = ppc_inst_suffix(instr); + op->type = COMPUTE; opcode = ppc_inst_primary_opcode(instr); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 00b24f357c2b..ac8ccf333d51 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -758,8 +758,8 @@ static int xmon_bpt(struct pt_regs *regs) /* Are we at the trap at bp->instr[1] for some bp? */ bp = in_breakpoint_table(regs->nip, &offset); - if (bp != NULL && offset == 4) { - regs->nip = bp->address + 4; + if (bp != NULL && (offset == 4 || offset == 8)) { + regs->nip = bp->address + offset; atomic_dec(&bp->ref_count); return 1; } diff --git a/arch/powerpc/xmon/xmon_bpts.S b/arch/powerpc/xmon/xmon_bpts.S index f3ad0ab50854..69726814cd27 100644 --- a/arch/powerpc/xmon/xmon_bpts.S +++ b/arch/powerpc/xmon/xmon_bpts.S @@ -4,6 +4,8 @@ #include #include "xmon_bpts.h" +/* Prefixed instructions can not cross 64 byte boundaries */ +.align 6 .global bpt_table bpt_table: .space NBPTS * BPT_SIZE -- cgit v1.2.3-59-g8ed1b From f77f8ff7f13e6411c2e0ba25bb7e012a5ae6c927 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:44 +1000 Subject: powerpc: Test prefixed code patching Expand the code-patching self-tests to includes tests for patching prefixed instructions. Signed-off-by: Jordan Niethe [mpe: Use CONFIG_PPC64 not __powerpc64__] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-25-jniethe5@gmail.com --- arch/powerpc/lib/Makefile | 2 +- arch/powerpc/lib/code-patching.c | 21 +++++++++++++++++++++ arch/powerpc/lib/test_code-patching.S | 20 ++++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/lib/test_code-patching.S diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 546591848219..5e994cda8e40 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -16,7 +16,7 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING endif -obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o +obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o ifndef CONFIG_KASAN obj-y += string.o memcmp_$(BITS).o diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index e9a0ea1c7ba4..64cf621e5b00 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -708,6 +708,26 @@ static void __init test_translate_branch(void) vfree(buf); } +#ifdef CONFIG_PPC64 +static void __init test_prefixed_patching(void) +{ + extern unsigned int code_patching_test1[]; + extern unsigned int code_patching_test1_expected[]; + extern unsigned int end_code_patching_test1[]; + + __patch_instruction((struct ppc_inst *)code_patching_test1, + ppc_inst_prefix(OP_PREFIX << 26, 0x00000000), + (struct ppc_inst *)code_patching_test1); + + check(!memcmp(code_patching_test1, + code_patching_test1_expected, + sizeof(unsigned int) * + (end_code_patching_test1 - code_patching_test1))); +} +#else +static inline void test_prefixed_patching(void) {} +#endif + static int __init test_code_patching(void) { printk(KERN_DEBUG "Running code patching self-tests ...\n"); @@ -716,6 +736,7 @@ static int __init test_code_patching(void) test_branch_bform(); test_create_function_call(); test_translate_branch(); + test_prefixed_patching(); return 0; } diff --git a/arch/powerpc/lib/test_code-patching.S b/arch/powerpc/lib/test_code-patching.S new file mode 100644 index 000000000000..a9be6107844e --- /dev/null +++ b/arch/powerpc/lib/test_code-patching.S @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 IBM Corporation + */ +#include + + .text + +#define globl(x) \ + .globl x; \ +x: + +globl(code_patching_test1) + nop + nop +globl(end_code_patching_test1) + +globl(code_patching_test1_expected) + .long OP_PREFIX << 26 + .long 0x0000000 -- cgit v1.2.3-59-g8ed1b From 785b79d1e02873c2088ee1301154c66dace66ce5 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:45 +1000 Subject: powerpc: Test prefixed instructions in feature fixups Expand the feature-fixups self-tests to includes tests for prefixed instructions. Signed-off-by: Jordan Niethe [mpe: Use CONFIG_PPC64 not __powerpc64__, add empty inlines] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-26-jniethe5@gmail.com --- arch/powerpc/lib/feature-fixups-test.S | 69 +++++++++++++++++++++++++++++++ arch/powerpc/lib/feature-fixups.c | 75 ++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+) diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S index b12168c2447a..480172fbd024 100644 --- a/arch/powerpc/lib/feature-fixups-test.S +++ b/arch/powerpc/lib/feature-fixups-test.S @@ -7,6 +7,7 @@ #include #include #include +#include .text @@ -791,3 +792,71 @@ globl(lwsync_fixup_test_expected_SYNC) 1: or 1,1,1 sync +globl(ftr_fixup_prefix1) + or 1,1,1 + .long OP_PREFIX << 26 + .long 0x0000000 + or 2,2,2 +globl(end_ftr_fixup_prefix1) + +globl(ftr_fixup_prefix1_orig) + or 1,1,1 + .long OP_PREFIX << 26 + .long 0x0000000 + or 2,2,2 + +globl(ftr_fixup_prefix1_expected) + or 1,1,1 + nop + nop + or 2,2,2 + +globl(ftr_fixup_prefix2) + or 1,1,1 + .long OP_PREFIX << 26 + .long 0x0000000 + or 2,2,2 +globl(end_ftr_fixup_prefix2) + +globl(ftr_fixup_prefix2_orig) + or 1,1,1 + .long OP_PREFIX << 26 + .long 0x0000000 + or 2,2,2 + +globl(ftr_fixup_prefix2_alt) + .long OP_PREFIX << 26 + .long 0x0000001 + +globl(ftr_fixup_prefix2_expected) + or 1,1,1 + .long OP_PREFIX << 26 + .long 0x0000001 + or 2,2,2 + +globl(ftr_fixup_prefix3) + or 1,1,1 + .long OP_PREFIX << 26 + .long 0x0000000 + or 2,2,2 + or 3,3,3 +globl(end_ftr_fixup_prefix3) + +globl(ftr_fixup_prefix3_orig) + or 1,1,1 + .long OP_PREFIX << 26 + .long 0x0000000 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_prefix3_alt) + .long OP_PREFIX << 26 + .long 0x0000001 + nop + +globl(ftr_fixup_prefix3_expected) + or 1,1,1 + .long OP_PREFIX << 26 + .long 0x0000001 + nop + or 3,3,3 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 1fb845f60f43..80f320c2e189 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -689,6 +689,78 @@ static void test_lwsync_macros(void) } } +#ifdef CONFIG_PPC64 +static void __init test_prefix_patching(void) +{ + extern unsigned int ftr_fixup_prefix1[]; + extern unsigned int end_ftr_fixup_prefix1[]; + extern unsigned int ftr_fixup_prefix1_orig[]; + extern unsigned int ftr_fixup_prefix1_expected[]; + int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1); + + fixup.value = fixup.mask = 8; + fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3); + fixup.alt_start_off = fixup.alt_end_off = 0; + + /* Sanity check */ + check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0); + + patch_feature_section(0, &fixup); + check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0); + check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0); +} + +static void __init test_prefix_alt_patching(void) +{ + extern unsigned int ftr_fixup_prefix2[]; + extern unsigned int end_ftr_fixup_prefix2[]; + extern unsigned int ftr_fixup_prefix2_orig[]; + extern unsigned int ftr_fixup_prefix2_expected[]; + extern unsigned int ftr_fixup_prefix2_alt[]; + int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2); + + fixup.value = fixup.mask = 8; + fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2); + /* Sanity check */ + check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0); + + patch_feature_section(0, &fixup); + check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0); + check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0); +} + +static void __init test_prefix_word_alt_patching(void) +{ + extern unsigned int ftr_fixup_prefix3[]; + extern unsigned int end_ftr_fixup_prefix3[]; + extern unsigned int ftr_fixup_prefix3_orig[]; + extern unsigned int ftr_fixup_prefix3_expected[]; + extern unsigned int ftr_fixup_prefix3_alt[]; + int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3); + + fixup.value = fixup.mask = 8; + fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3); + /* Sanity check */ + check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0); + + patch_feature_section(0, &fixup); + check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0); + patch_feature_section(0, &fixup); + check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0); +} +#else +static inline void test_prefix_patching(void) {} +static inline void test_prefix_alt_patching(void) {} +static inline void test_prefix_word_alt_patching(void) {} +#endif /* CONFIG_PPC64 */ + static int __init test_feature_fixups(void) { printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); @@ -703,6 +775,9 @@ static int __init test_feature_fixups(void) test_cpu_macros(); test_fw_macros(); test_lwsync_macros(); + test_prefix_patching(); + test_prefix_alt_patching(); + test_prefix_word_alt_patching(); return 0; } -- cgit v1.2.3-59-g8ed1b From c9c831aebd8663d0129bbcee4d76be889f0627fe Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:46 +1000 Subject: powerpc/xmon: Don't allow breakpoints on suffixes Do not allow placing xmon breakpoints on the suffix of a prefix instruction. Signed-off-by: Jordan Niethe [mpe: Don't split printf strings across lines] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-27-jniethe5@gmail.com --- arch/powerpc/xmon/xmon.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index ac8ccf333d51..d1a79f9e0566 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -889,8 +889,8 @@ static struct bpt *new_breakpoint(unsigned long a) static void insert_bpts(void) { int i; - struct ppc_inst instr; - struct bpt *bp; + struct ppc_inst instr, instr2; + struct bpt *bp, *bp2; bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) { @@ -908,6 +908,29 @@ static void insert_bpts(void) bp->enabled = 0; continue; } + /* + * Check the address is not a suffix by looking for a prefix in + * front of it. + */ + if (mread_instr(bp->address - 4, &instr2) == 8) { + printf("Breakpoint at %lx is on the second word of a prefixed instruction, disabling it\n", + bp->address); + bp->enabled = 0; + continue; + } + /* + * We might still be a suffix - if the prefix has already been + * replaced by a breakpoint we won't catch it with the above + * test. + */ + bp2 = at_breakpoint(bp->address - 4); + if (bp2 && ppc_inst_prefixed(ppc_inst_read(bp2->instr))) { + printf("Breakpoint at %lx is on the second word of a prefixed instruction, disabling it\n", + bp->address); + bp->enabled = 0; + continue; + } + patch_instruction(bp->instr, instr); patch_instruction((void *)bp->instr + ppc_inst_len(instr), ppc_inst(bpinstr)); -- cgit v1.2.3-59-g8ed1b From b4657f7650babc9bfb41ce875abe41b18604a105 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:47 +1000 Subject: powerpc/kprobes: Don't allow breakpoints on suffixes Do not allow inserting breakpoints on the suffix of a prefix instruction in kprobes. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-28-jniethe5@gmail.com --- arch/powerpc/kernel/kprobes.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 33d54b091c70..227510df8c55 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -106,7 +106,9 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) int arch_prepare_kprobe(struct kprobe *p) { int ret = 0; + struct kprobe *prev; struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr); + struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1)); if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); @@ -114,6 +116,17 @@ int arch_prepare_kprobe(struct kprobe *p) } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); ret = -EINVAL; + } else if (ppc_inst_prefixed(prefix)) { + printk("Cannot register a kprobe on the second word of prefixed instruction\n"); + ret = -EINVAL; + } + preempt_disable(); + prev = get_kprobe(p->addr - 1); + preempt_enable_no_resched(); + if (prev && + ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) { + printk("Cannot register a kprobe on the second word of prefixed instruction\n"); + ret = -EINVAL; } /* insn must be on a special executable page on ppc64. This is -- cgit v1.2.3-59-g8ed1b From 9409d2f9dad2f0679d67dc24d8116dd3e837b035 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:48 +1000 Subject: powerpc: Support prefixed instructions in alignment handler If a prefixed instruction results in an alignment exception, the SRR1_PREFIXED bit is set. The handler attempts to emulate the responsible instruction and then increment the NIP past it. Use SRR1_PREFIXED to determine by how much the NIP should be incremented. Prefixed instructions are not permitted to cross 64-byte boundaries. If they do the alignment interrupt is invoked with SRR1 BOUNDARY bit set. If this occurs send a SIGBUS to the offending process if in user mode. If in kernel mode call bad_page_fault(). Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-29-jniethe5@gmail.com --- arch/powerpc/kernel/traps.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index e37bf7945d27..051d7028e71f 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -588,6 +588,8 @@ static inline int check_io_access(struct pt_regs *regs) #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) #define REASON_PRIVILEGED ESR_PPR #define REASON_TRAP ESR_PTR +#define REASON_PREFIXED 0 +#define REASON_BOUNDARY 0 /* single-step stuff */ #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) @@ -602,12 +604,16 @@ static inline int check_io_access(struct pt_regs *regs) #define REASON_ILLEGAL SRR1_PROGILL #define REASON_PRIVILEGED SRR1_PROGPRIV #define REASON_TRAP SRR1_PROGTRAP +#define REASON_PREFIXED SRR1_PREFIXED +#define REASON_BOUNDARY SRR1_BOUNDARY #define single_stepping(regs) ((regs)->msr & MSR_SE) #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE) #endif +#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4) + #if defined(CONFIG_E500) int machine_check_e500mc(struct pt_regs *regs) { @@ -1610,11 +1616,20 @@ void alignment_exception(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); int sig, code, fixed = 0; + unsigned long reason; /* We restore the interrupt state now */ if (!arch_irq_disabled_regs(regs)) local_irq_enable(); + reason = get_reason(regs); + + if (reason & REASON_BOUNDARY) { + sig = SIGBUS; + code = BUS_ADRALN; + goto bad; + } + if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) goto bail; @@ -1623,7 +1638,8 @@ void alignment_exception(struct pt_regs *regs) fixed = fix_alignment(regs); if (fixed == 1) { - regs->nip += 4; /* skip over emulated instruction */ + /* skip over emulated instruction */ + regs->nip += inst_length(reason); emulate_single_step(regs); goto bail; } @@ -1636,6 +1652,7 @@ void alignment_exception(struct pt_regs *regs) sig = SIGBUS; code = BUS_ADRALN; } +bad: if (user_mode(regs)) _exception(sig, regs, code, regs->dar); else -- cgit v1.2.3-59-g8ed1b From 50b80a12e4ccff46d53b93754d817acd98bc9ae0 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:49 +1000 Subject: powerpc sstep: Add support for prefixed load/stores This adds emulation support for the following prefixed integer load/stores: * Prefixed Load Byte and Zero (plbz) * Prefixed Load Halfword and Zero (plhz) * Prefixed Load Halfword Algebraic (plha) * Prefixed Load Word and Zero (plwz) * Prefixed Load Word Algebraic (plwa) * Prefixed Load Doubleword (pld) * Prefixed Store Byte (pstb) * Prefixed Store Halfword (psth) * Prefixed Store Word (pstw) * Prefixed Store Doubleword (pstd) * Prefixed Load Quadword (plq) * Prefixed Store Quadword (pstq) the follow prefixed floating-point load/stores: * Prefixed Load Floating-Point Single (plfs) * Prefixed Load Floating-Point Double (plfd) * Prefixed Store Floating-Point Single (pstfs) * Prefixed Store Floating-Point Double (pstfd) and for the following prefixed VSX load/stores: * Prefixed Load VSX Scalar Doubleword (plxsd) * Prefixed Load VSX Scalar Single-Precision (plxssp) * Prefixed Load VSX Vector [0|1] (plxv, plxv0, plxv1) * Prefixed Store VSX Scalar Doubleword (pstxsd) * Prefixed Store VSX Scalar Single-Precision (pstxssp) * Prefixed Store VSX Vector [0|1] (pstxv, pstxv0, pstxv1) Signed-off-by: Jordan Niethe Reviewed-by: Balamuruhan S [mpe: Use CONFIG_PPC64 not __powerpc64__, use get_op()] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-30-jniethe5@gmail.com --- arch/powerpc/include/asm/sstep.h | 4 + arch/powerpc/lib/sstep.c | 164 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 166 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index c3ce903ac488..9b200a5f8794 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -90,11 +90,15 @@ enum instruction_type { #define VSX_LDLEFT 4 /* load VSX register from left */ #define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */ +/* Prefixed flag, ORed in with type */ +#define PREFIXED 0x800 + /* Size field in type word */ #define SIZE(n) ((n) << 12) #define GETSIZE(w) ((w) >> 12) #define GETTYPE(t) ((t) & INSTR_TYPE_MASK) +#define GETLENGTH(t) (((t) & PREFIXED) ? 8 : 4) #define MKOP(t, f, s) ((t) | (f) | SIZE(s)) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index ecd756c346fd..6549baa2ec03 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -13,6 +13,7 @@ #include #include #include +#include extern char system_call_common[]; @@ -187,6 +188,44 @@ static nokprobe_inline unsigned long xform_ea(unsigned int instr, return ea; } +/* + * Calculate effective address for a MLS:D-form / 8LS:D-form + * prefixed instruction + */ +static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, + unsigned int suffix, + const struct pt_regs *regs) +{ + int ra, prefix_r; + unsigned int dd; + unsigned long ea, d0, d1, d; + + prefix_r = instr & (1ul << 20); + ra = (suffix >> 16) & 0x1f; + + d0 = instr & 0x3ffff; + d1 = suffix & 0xffff; + d = (d0 << 16) | d1; + + /* + * sign extend a 34 bit number + */ + dd = (unsigned int)(d >> 2); + ea = (signed int)dd; + ea = (ea << 2) | (d & 0x3); + + if (!prefix_r && ra) + ea += regs->gpr[ra]; + else if (!prefix_r && !ra) + ; /* Leave ea as is */ + else if (prefix_r && !ra) + ea += regs->nip; + else if (prefix_r && ra) + ; /* Invalid form. Should already be checked for by caller! */ + + return ea; +} + /* * Return the largest power of 2, not greater than sizeof(unsigned long), * such that x is a multiple of it. @@ -1165,6 +1204,9 @@ static nokprobe_inline int trap_compare(long v1, long v2) int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, struct ppc_inst instr) { +#ifdef CONFIG_PPC64 + unsigned int suffixopcode, prefixtype, prefix_r; +#endif unsigned int opcode, ra, rb, rc, rd, spr, u; unsigned long int imm; unsigned long int val, val2; @@ -2652,6 +2694,124 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; } break; + case 1: /* Prefixed instructions */ + prefix_r = word & (1ul << 20); + ra = (suffix >> 16) & 0x1f; + op->update_reg = ra; + rd = (suffix >> 21) & 0x1f; + op->reg = rd; + op->val = regs->gpr[rd]; + + suffixopcode = get_op(suffix); + prefixtype = (word >> 24) & 0x3; + switch (prefixtype) { + case 0: /* Type 00 Eight-Byte Load/Store */ + if (prefix_r && ra) + break; + op->ea = mlsd_8lsd_ea(word, suffix, regs); + switch (suffixopcode) { + case 41: /* plwa */ + op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); + break; + case 42: /* plxsd */ + op->reg = rd + 32; + op->type = MKOP(LOAD_VSX, PREFIXED, 8); + op->element_size = 8; + op->vsx_flags = VSX_CHECK_VEC; + break; + case 43: /* plxssp */ + op->reg = rd + 32; + op->type = MKOP(LOAD_VSX, PREFIXED, 4); + op->element_size = 8; + op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; + break; + case 46: /* pstxsd */ + op->reg = rd + 32; + op->type = MKOP(STORE_VSX, PREFIXED, 8); + op->element_size = 8; + op->vsx_flags = VSX_CHECK_VEC; + break; + case 47: /* pstxssp */ + op->reg = rd + 32; + op->type = MKOP(STORE_VSX, PREFIXED, 4); + op->element_size = 8; + op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; + break; + case 51: /* plxv1 */ + op->reg += 32; + fallthrough; + case 50: /* plxv0 */ + op->type = MKOP(LOAD_VSX, PREFIXED, 16); + op->element_size = 16; + op->vsx_flags = VSX_CHECK_VEC; + break; + case 55: /* pstxv1 */ + op->reg = rd + 32; + fallthrough; + case 54: /* pstxv0 */ + op->type = MKOP(STORE_VSX, PREFIXED, 16); + op->element_size = 16; + op->vsx_flags = VSX_CHECK_VEC; + break; + case 56: /* plq */ + op->type = MKOP(LOAD, PREFIXED, 16); + break; + case 57: /* pld */ + op->type = MKOP(LOAD, PREFIXED, 8); + break; + case 60: /* stq */ + op->type = MKOP(STORE, PREFIXED, 16); + break; + case 61: /* pstd */ + op->type = MKOP(STORE, PREFIXED, 8); + break; + } + break; + case 1: /* Type 01 Eight-Byte Register-to-Register */ + break; + case 2: /* Type 10 Modified Load/Store */ + if (prefix_r && ra) + break; + op->ea = mlsd_8lsd_ea(word, suffix, regs); + switch (suffixopcode) { + case 32: /* plwz */ + op->type = MKOP(LOAD, PREFIXED, 4); + break; + case 34: /* plbz */ + op->type = MKOP(LOAD, PREFIXED, 1); + break; + case 36: /* pstw */ + op->type = MKOP(STORE, PREFIXED, 4); + break; + case 38: /* pstb */ + op->type = MKOP(STORE, PREFIXED, 1); + break; + case 40: /* plhz */ + op->type = MKOP(LOAD, PREFIXED, 2); + break; + case 42: /* plha */ + op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2); + break; + case 44: /* psth */ + op->type = MKOP(STORE, PREFIXED, 2); + break; + case 48: /* plfs */ + op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4); + break; + case 50: /* plfd */ + op->type = MKOP(LOAD_FP, PREFIXED, 8); + break; + case 52: /* pstfs */ + op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4); + break; + case 54: /* pstfd */ + op->type = MKOP(STORE_FP, PREFIXED, 8); + break; + } + break; + case 3: /* Type 11 Modified Register-to-Register */ + break; + } #endif /* __powerpc64__ */ } @@ -2760,7 +2920,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) { unsigned long next_pc; - next_pc = truncate_if_32bit(regs->msr, regs->nip + 4); + next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type)); switch (GETTYPE(op->type)) { case COMPUTE: if (op->type & SETREG) @@ -3205,7 +3365,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) return 0; instr_done: - regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); + regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)); return 1; } NOKPROBE_SYMBOL(emulate_step); -- cgit v1.2.3-59-g8ed1b From 3920742b92f5ea19a220edb947b6f33c99f501da Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:50 +1000 Subject: powerpc sstep: Add support for prefixed fixed-point arithmetic This adds emulation support for the following prefixed Fixed-Point Arithmetic instructions: * Prefixed Add Immediate (paddi) Signed-off-by: Jordan Niethe Reviewed-by: Balamuruhan S [mpe: Squash in get_op() usage] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-31-jniethe5@gmail.com --- arch/powerpc/lib/sstep.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 6549baa2ec03..5abe98216dc2 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1338,6 +1338,26 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, switch (opcode) { #ifdef __powerpc64__ + case 1: + prefix_r = word & (1ul << 20); + ra = (suffix >> 16) & 0x1f; + rd = (suffix >> 21) & 0x1f; + op->reg = rd; + op->val = regs->gpr[rd]; + suffixopcode = get_op(suffix); + prefixtype = (word >> 24) & 0x3; + switch (prefixtype) { + case 2: + if (prefix_r && ra) + return 0; + switch (suffixopcode) { + case 14: /* paddi */ + op->type = COMPUTE | PREFIXED; + op->val = mlsd_8lsd_ea(word, suffix, regs); + goto compute_done; + } + } + break; case 2: /* tdi */ if (rd & trap_compare(regs->gpr[ra], (short) word)) goto trap; -- cgit v1.2.3-59-g8ed1b From 09f82b063aa9c248a3ef919aeec361054e7b044a Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:26 +0530 Subject: powerpc/watchpoint: Rename current DAWR macros Power10 is introducing second DAWR. Use real register names from ISA for current macros: s/SPRN_DAWR/SPRN_DAWR0/ s/SPRN_DAWRX/SPRN_DAWRX0/ Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-2-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/reg.h | 4 ++-- arch/powerpc/kernel/dawr.c | 4 ++-- arch/powerpc/kvm/book3s_hv.c | 12 ++++++------ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 18 +++++++++--------- arch/powerpc/xmon/xmon.c | 2 +- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index f95eb8f97756..60a21b6b2057 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -283,14 +283,14 @@ #define CTRL_CT1 0x40000000 /* thread 1 */ #define CTRL_TE 0x00c00000 /* thread enable */ #define CTRL_RUNLATCH 0x1 -#define SPRN_DAWR 0xB4 +#define SPRN_DAWR0 0xB4 #define SPRN_RPR 0xBA /* Relative Priority Register */ #define SPRN_CIABR 0xBB #define CIABR_PRIV 0x3 #define CIABR_PRIV_USER 1 #define CIABR_PRIV_SUPER 2 #define CIABR_PRIV_HYPER 3 -#define SPRN_DAWRX 0xBC +#define SPRN_DAWRX0 0xBC #define DAWRX_USER __MASK(0) #define DAWRX_KERNEL __MASK(1) #define DAWRX_HYP __MASK(2) diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c index cc14aa6c4a1b..e91b613bf137 100644 --- a/arch/powerpc/kernel/dawr.c +++ b/arch/powerpc/kernel/dawr.c @@ -39,8 +39,8 @@ int set_dawr(struct arch_hw_breakpoint *brk) if (ppc_md.set_dawr) return ppc_md.set_dawr(dawr, dawrx); - mtspr(SPRN_DAWR, dawr); - mtspr(SPRN_DAWRX, dawrx); + mtspr(SPRN_DAWR0, dawr); + mtspr(SPRN_DAWRX0, dawrx); return 0; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 93493f0cbfe8..db07199f0977 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3392,8 +3392,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, int trap; unsigned long host_hfscr = mfspr(SPRN_HFSCR); unsigned long host_ciabr = mfspr(SPRN_CIABR); - unsigned long host_dawr = mfspr(SPRN_DAWR); - unsigned long host_dawrx = mfspr(SPRN_DAWRX); + unsigned long host_dawr = mfspr(SPRN_DAWR0); + unsigned long host_dawrx = mfspr(SPRN_DAWRX0); unsigned long host_psscr = mfspr(SPRN_PSSCR); unsigned long host_pidr = mfspr(SPRN_PID); @@ -3422,8 +3422,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_SPURR, vcpu->arch.spurr); if (dawr_enabled()) { - mtspr(SPRN_DAWR, vcpu->arch.dawr); - mtspr(SPRN_DAWRX, vcpu->arch.dawrx); + mtspr(SPRN_DAWR0, vcpu->arch.dawr); + mtspr(SPRN_DAWRX0, vcpu->arch.dawrx); } mtspr(SPRN_CIABR, vcpu->arch.ciabr); mtspr(SPRN_IC, vcpu->arch.ic); @@ -3475,8 +3475,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); mtspr(SPRN_HFSCR, host_hfscr); mtspr(SPRN_CIABR, host_ciabr); - mtspr(SPRN_DAWR, host_dawr); - mtspr(SPRN_DAWRX, host_dawrx); + mtspr(SPRN_DAWR0, host_dawr); + mtspr(SPRN_DAWRX0, host_dawrx); mtspr(SPRN_PID, host_pidr); /* diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 780a499c7114..70de3325d0e9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -707,8 +707,8 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) BEGIN_FTR_SECTION mfspr r5, SPRN_CIABR - mfspr r6, SPRN_DAWR - mfspr r7, SPRN_DAWRX + mfspr r6, SPRN_DAWR0 + mfspr r7, SPRN_DAWRX0 mfspr r8, SPRN_IAMR std r5, STACK_SLOT_CIABR(r1) std r6, STACK_SLOT_DAWR(r1) @@ -803,8 +803,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) beq 1f ld r5, VCPU_DAWR(r4) ld r6, VCPU_DAWRX(r4) - mtspr SPRN_DAWR, r5 - mtspr SPRN_DAWRX, r6 + mtspr SPRN_DAWR0, r5 + mtspr SPRN_DAWRX0, r6 1: ld r7, VCPU_CIABR(r4) ld r8, VCPU_TAR(r4) @@ -1766,8 +1766,8 @@ BEGIN_FTR_SECTION * If the DAWR doesn't work, it's ok to write these here as * this value should always be zero */ - mtspr SPRN_DAWR, r6 - mtspr SPRN_DAWRX, r7 + mtspr SPRN_DAWR0, r6 + mtspr SPRN_DAWRX0, r7 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) BEGIN_FTR_SECTION ld r5, STACK_SLOT_TID(r1) @@ -2577,8 +2577,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mfmsr r6 andi. r6, r6, MSR_DR /* in real mode? */ bne 4f - mtspr SPRN_DAWR, r4 - mtspr SPRN_DAWRX, r5 + mtspr SPRN_DAWR0, r4 + mtspr SPRN_DAWRX0, r5 4: li r3, 0 blr @@ -3329,7 +3329,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_AMR, r0 mtspr SPRN_IAMR, r0 mtspr SPRN_CIABR, r0 - mtspr SPRN_DAWRX, r0 + mtspr SPRN_DAWRX0, r0 BEGIN_MMU_FTR_SECTION b 4f diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index d1a79f9e0566..effb10c2e32f 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1956,7 +1956,7 @@ static void dump_207_sprs(void) printf("hfscr = %.16lx dhdes = %.16lx rpr = %.16lx\n", mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR)); printf("dawr = %.16lx dawrx = %.16lx ciabr = %.16lx\n", - mfspr(SPRN_DAWR), mfspr(SPRN_DAWRX), mfspr(SPRN_CIABR)); + mfspr(SPRN_DAWR0), mfspr(SPRN_DAWRX0), mfspr(SPRN_CIABR)); #endif } -- cgit v1.2.3-59-g8ed1b From 4a4ec2289a5d748cb64ff67ca8d74535a76a8436 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:27 +0530 Subject: powerpc/watchpoint: Add SPRN macros for second DAWR Power10 is introducing second DAWR. Add SPRN_ macros for the same. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-3-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/reg.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 60a21b6b2057..054f8a71d686 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -284,6 +284,7 @@ #define CTRL_TE 0x00c00000 /* thread enable */ #define CTRL_RUNLATCH 0x1 #define SPRN_DAWR0 0xB4 +#define SPRN_DAWR1 0xB5 #define SPRN_RPR 0xBA /* Relative Priority Register */ #define SPRN_CIABR 0xBB #define CIABR_PRIV 0x3 @@ -291,6 +292,7 @@ #define CIABR_PRIV_SUPER 2 #define CIABR_PRIV_HYPER 3 #define SPRN_DAWRX0 0xBC +#define SPRN_DAWRX1 0xBD #define DAWRX_USER __MASK(0) #define DAWRX_KERNEL __MASK(1) #define DAWRX_HYP __MASK(2) -- cgit v1.2.3-59-g8ed1b From a6ba44e8799230e36c8ab06fda7f77f421e9e795 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:28 +0530 Subject: powerpc/watchpoint: Introduce function to get nr watchpoints dynamically So far we had only one watchpoint, so we have hardcoded HBP_NUM to 1. But Power10 is introducing 2nd DAWR and thus kernel should be able to dynamically find actual number of watchpoints supported by hw it's running on. Introduce function for the same. Also convert HBP_NUM macro to HBP_NUM_MAX, which will now represent maximum number of watchpoints supported by Powerpc. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-4-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/cputable.h | 6 +++++- arch/powerpc/include/asm/hw_breakpoint.h | 5 +++++ arch/powerpc/include/asm/processor.h | 2 +- arch/powerpc/kernel/hw_breakpoint.c | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 40a4d3c6fd99..c67b94f3334c 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -614,7 +614,11 @@ enum { }; #endif /* __powerpc64__ */ -#define HBP_NUM 1 +/* + * Maximum number of hw breakpoint supported on powerpc. Number of + * breakpoints supported by actual hw might be less than this. + */ +#define HBP_NUM_MAX 1 #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index f2f8d8aa8e3b..518b41eef924 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -43,6 +43,11 @@ struct arch_hw_breakpoint { #define DABR_MAX_LEN 8 #define DAWR_MAX_LEN 512 +static inline int nr_wp_slots(void) +{ + return HBP_NUM_MAX; +} + #ifdef CONFIG_HAVE_HW_BREAKPOINT #include #include diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 5ab202055d5a..f209c5703ee2 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -180,7 +180,7 @@ struct thread_struct { int fpexc_mode; /* floating-point exception mode */ unsigned int align_ctl; /* alignment handling control */ #ifdef CONFIG_HAVE_HW_BREAKPOINT - struct perf_event *ptrace_bps[HBP_NUM]; + struct perf_event *ptrace_bps[HBP_NUM_MAX]; /* * Helps identify source of single-step exception and subsequent * hw-breakpoint enablement diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 423603c92c0f..01f07d91df70 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -39,7 +39,7 @@ static DEFINE_PER_CPU(struct perf_event *, bp_per_reg); int hw_breakpoint_slots(int type) { if (type == TYPE_DATA) - return HBP_NUM; + return nr_wp_slots(); return 0; /* no instruction breakpoints available */ } -- cgit v1.2.3-59-g8ed1b From 45093b382e0ac25c206b4dcd210c6be1f5e56e60 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:29 +0530 Subject: powerpc/watchpoint/ptrace: Return actual num of available watchpoints User can ask for num of available watchpoints(dbginfo.num_data_bps) using ptrace(PPC_PTRACE_GETHWDBGINFO). Return actual number of available watchpoints on the machine rather than hardcoded 1. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-5-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/ptrace/ptrace-noadv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c index f87e7c5c3bf3..12962302d6a4 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c +++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c @@ -44,7 +44,7 @@ void ppc_gethwdinfo(struct ppc_debug_info *dbginfo) dbginfo->version = 1; dbginfo->num_instruction_bps = 0; if (ppc_breakpoint_available()) - dbginfo->num_data_bps = 1; + dbginfo->num_data_bps = nr_wp_slots(); else dbginfo->num_data_bps = 0; dbginfo->num_condition_regs = 0; -- cgit v1.2.3-59-g8ed1b From a18b834625d345bfa89c4e2754dd6cbb0133c4d7 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:30 +0530 Subject: powerpc/watchpoint: Provide DAWR number to set_dawr Introduce new parameter 'nr' to set_dawr() which indicates which DAWR should be programed. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-6-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/hw_breakpoint.h | 4 ++-- arch/powerpc/kernel/dawr.c | 15 ++++++++++----- arch/powerpc/kernel/process.c | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 518b41eef924..5b3b02834e0b 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -104,10 +104,10 @@ static inline bool dawr_enabled(void) { return dawr_force_enable; } -int set_dawr(struct arch_hw_breakpoint *brk); +int set_dawr(int nr, struct arch_hw_breakpoint *brk); #else static inline bool dawr_enabled(void) { return false; } -static inline int set_dawr(struct arch_hw_breakpoint *brk) { return -1; } +static inline int set_dawr(int nr, struct arch_hw_breakpoint *brk) { return -1; } #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c index e91b613bf137..8114ad3a8574 100644 --- a/arch/powerpc/kernel/dawr.c +++ b/arch/powerpc/kernel/dawr.c @@ -16,7 +16,7 @@ bool dawr_force_enable; EXPORT_SYMBOL_GPL(dawr_force_enable); -int set_dawr(struct arch_hw_breakpoint *brk) +int set_dawr(int nr, struct arch_hw_breakpoint *brk) { unsigned long dawr, dawrx, mrd; @@ -39,15 +39,20 @@ int set_dawr(struct arch_hw_breakpoint *brk) if (ppc_md.set_dawr) return ppc_md.set_dawr(dawr, dawrx); - mtspr(SPRN_DAWR0, dawr); - mtspr(SPRN_DAWRX0, dawrx); + if (nr == 0) { + mtspr(SPRN_DAWR0, dawr); + mtspr(SPRN_DAWRX0, dawrx); + } else { + mtspr(SPRN_DAWR1, dawr); + mtspr(SPRN_DAWRX1, dawrx); + } return 0; } static void set_dawr_cb(void *info) { - set_dawr(info); + set_dawr(0, info); } static ssize_t dawr_write_file_bool(struct file *file, @@ -60,7 +65,7 @@ static ssize_t dawr_write_file_bool(struct file *file, /* Send error to user if they hypervisor won't allow us to write DAWR */ if (!dawr_force_enable && firmware_has_feature(FW_FEATURE_LPAR) && - set_dawr(&null_brk) != H_SUCCESS) + set_dawr(0, &null_brk) != H_SUCCESS) return -ENODEV; rc = debugfs_write_file_bool(file, user_buf, count, ppos); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index db766252238f..dc161b0adc82 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -806,7 +806,7 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk) if (dawr_enabled()) // Power8 or later - set_dawr(brk); + set_dawr(0, brk); else if (IS_ENABLED(CONFIG_PPC_8xx)) set_breakpoint_8xx(brk); else if (!cpu_has_feature(CPU_FTR_ARCH_207S)) -- cgit v1.2.3-59-g8ed1b From 4a8a9379f2af4c9928529b3959bc2d8f7023c6bc Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:31 +0530 Subject: powerpc/watchpoint: Provide DAWR number to __set_breakpoint Introduce new parameter 'nr' to __set_breakpoint() which indicates which DAWR should be programed. Also convert current_brk variable to an array. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-7-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/debug.h | 2 +- arch/powerpc/include/asm/hw_breakpoint.h | 2 +- arch/powerpc/kernel/hw_breakpoint.c | 8 ++++---- arch/powerpc/kernel/process.c | 14 +++++++------- arch/powerpc/kernel/signal.c | 2 +- arch/powerpc/xmon/xmon.c | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index 7756026b95ca..ec57daf87f40 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -45,7 +45,7 @@ static inline int debugger_break_match(struct pt_regs *regs) { return 0; } static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } #endif -void __set_breakpoint(struct arch_hw_breakpoint *brk); +void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk); bool ppc_breakpoint_available(void); #ifdef CONFIG_PPC_ADV_DEBUG_REGS extern void do_send_trap(struct pt_regs *regs, unsigned long address, diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 5b3b02834e0b..1120c7d9db58 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -85,7 +85,7 @@ static inline void hw_breakpoint_disable(void) brk.len = 0; brk.hw_len = 0; if (ppc_breakpoint_available()) - __set_breakpoint(&brk); + __set_breakpoint(0, &brk); } extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); int hw_breakpoint_handler(struct die_args *args); diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 01f07d91df70..f5472402c06d 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -64,7 +64,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) * If so, DABR will be populated in single_step_dabr_instruction(). */ if (current->thread.last_hit_ubp != bp) - __set_breakpoint(info); + __set_breakpoint(0, info); return 0; } @@ -222,7 +222,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) info = counter_arch_bp(tsk->thread.last_hit_ubp); regs->msr &= ~MSR_SE; - __set_breakpoint(info); + __set_breakpoint(0, info); tsk->thread.last_hit_ubp = NULL; } @@ -347,7 +347,7 @@ int hw_breakpoint_handler(struct die_args *args) if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) perf_bp_event(bp, regs); - __set_breakpoint(info); + __set_breakpoint(0, info); out: rcu_read_unlock(); return rc; @@ -380,7 +380,7 @@ static int single_step_dabr_instruction(struct die_args *args) if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) perf_bp_event(bp, regs); - __set_breakpoint(info); + __set_breakpoint(0, info); current->thread.last_hit_ubp = NULL; /* diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index dc161b0adc82..f303aea61794 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -637,7 +637,7 @@ void do_break (struct pt_regs *regs, unsigned long address, } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ -static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); +static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]); #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* @@ -714,7 +714,7 @@ EXPORT_SYMBOL_GPL(switch_booke_debug_regs); static void set_breakpoint(struct arch_hw_breakpoint *brk) { preempt_disable(); - __set_breakpoint(brk); + __set_breakpoint(0, brk); preempt_enable(); } @@ -800,13 +800,13 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk) return 0; } -void __set_breakpoint(struct arch_hw_breakpoint *brk) +void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk) { - memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); + memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk)); if (dawr_enabled()) // Power8 or later - set_dawr(0, brk); + set_dawr(nr, brk); else if (IS_ENABLED(CONFIG_PPC_8xx)) set_breakpoint_8xx(brk); else if (!cpu_has_feature(CPU_FTR_ARCH_207S)) @@ -1174,8 +1174,8 @@ struct task_struct *__switch_to(struct task_struct *prev, * schedule DABR */ #ifndef CONFIG_HAVE_HW_BREAKPOINT - if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) - __set_breakpoint(&new->thread.hw_brk); + if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk[0]), &new->thread.hw_brk))) + __set_breakpoint(0, &new->thread.hw_brk); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index a46c3fdb6853..8e29138a344a 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -272,7 +272,7 @@ static void do_signal(struct task_struct *tsk) * triggered inside the kernel. */ if (tsk->thread.hw_brk.address && tsk->thread.hw_brk.type) - __set_breakpoint(&tsk->thread.hw_brk); + __set_breakpoint(0, &tsk->thread.hw_brk); #endif /* Re-enable the breakpoints for the signal stack */ thread_change_pc(tsk, tsk->thread.regs); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index effb10c2e32f..30b3e3d99c0d 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -954,7 +954,7 @@ static void insert_cpu_bpts(void) brk.address = dabr.address; brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; brk.len = DABR_MAX_LEN; - __set_breakpoint(&brk); + __set_breakpoint(0, &brk); } if (iabr) -- cgit v1.2.3-59-g8ed1b From c2919132734f29a7a33e1339bef8a67b11f322eb Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:32 +0530 Subject: powerpc/watchpoint: Get watchpoint count dynamically while disabling them Instead of disabling only one watchpoint, get num of available watchpoints dynamically and disable all of them. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-8-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/hw_breakpoint.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 1120c7d9db58..d472b2eb757e 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -78,14 +78,14 @@ extern void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs); static inline void hw_breakpoint_disable(void) { - struct arch_hw_breakpoint brk; - - brk.address = 0; - brk.type = 0; - brk.len = 0; - brk.hw_len = 0; - if (ppc_breakpoint_available()) - __set_breakpoint(0, &brk); + int i; + struct arch_hw_breakpoint null_brk = {0}; + + if (!ppc_breakpoint_available()) + return; + + for (i = 0; i < nr_wp_slots(); i++) + __set_breakpoint(i, &null_brk); } extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); int hw_breakpoint_handler(struct die_args *args); -- cgit v1.2.3-59-g8ed1b From 22a214e461c5cc9428b86915d9cfcf84c6e11ad7 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:33 +0530 Subject: powerpc/watchpoint: Disable all available watchpoints when !dawr_force_enable Instead of disabling only first watchpoint, disable all available watchpoints while clearing dawr_force_enable. Callback function is used only for disabling watchpoint, rename it to disable_dawrs_cb(). And null_brk parameter is not really required while disabling watchpoint, remove it. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-9-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/dawr.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c index 8114ad3a8574..500f52fa4711 100644 --- a/arch/powerpc/kernel/dawr.c +++ b/arch/powerpc/kernel/dawr.c @@ -50,9 +50,13 @@ int set_dawr(int nr, struct arch_hw_breakpoint *brk) return 0; } -static void set_dawr_cb(void *info) +static void disable_dawrs_cb(void *info) { - set_dawr(0, info); + struct arch_hw_breakpoint null_brk = {0}; + int i; + + for (i = 0; i < nr_wp_slots(); i++) + set_dawr(i, &null_brk); } static ssize_t dawr_write_file_bool(struct file *file, @@ -74,7 +78,7 @@ static ssize_t dawr_write_file_bool(struct file *file, /* If we are clearing, make sure all CPUs have the DAWR cleared */ if (!dawr_force_enable) - smp_call_function(set_dawr_cb, &null_brk, 0); + smp_call_function(disable_dawrs_cb, NULL, 0); return rc; } -- cgit v1.2.3-59-g8ed1b From 303e6a9ddcdc168e92253c78cdb4bbe1e10d78b3 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:34 +0530 Subject: powerpc/watchpoint: Convert thread_struct->hw_brk to an array So far powerpc hw supported only one watchpoint. But Power10 is introducing 2nd DAWR. Convert thread_struct->hw_brk into an array. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-10-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/processor.h | 2 +- arch/powerpc/kernel/process.c | 60 +++++++++++++++++++------------ arch/powerpc/kernel/ptrace/ptrace-noadv.c | 40 +++++++++++++++------ arch/powerpc/kernel/ptrace/ptrace32.c | 4 +-- arch/powerpc/kernel/signal.c | 13 ++++--- 5 files changed, 78 insertions(+), 41 deletions(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index f209c5703ee2..fba6b586e3c8 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -187,7 +187,7 @@ struct thread_struct { */ struct perf_event *last_hit_ubp; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ - struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ + struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */ unsigned long trap_nr; /* last trap # on this thread */ u8 load_slb; /* Ages out SLB preload cache entries */ u8 load_fp; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index f303aea61794..d94d8925711c 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -711,21 +711,49 @@ void switch_booke_debug_regs(struct debug_reg *new_debug) EXPORT_SYMBOL_GPL(switch_booke_debug_regs); #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ #ifndef CONFIG_HAVE_HW_BREAKPOINT -static void set_breakpoint(struct arch_hw_breakpoint *brk) +static void set_breakpoint(int i, struct arch_hw_breakpoint *brk) { preempt_disable(); - __set_breakpoint(0, brk); + __set_breakpoint(i, brk); preempt_enable(); } static void set_debug_reg_defaults(struct thread_struct *thread) { - thread->hw_brk.address = 0; - thread->hw_brk.type = 0; - thread->hw_brk.len = 0; - thread->hw_brk.hw_len = 0; - if (ppc_breakpoint_available()) - set_breakpoint(&thread->hw_brk); + int i; + struct arch_hw_breakpoint null_brk = {0}; + + for (i = 0; i < nr_wp_slots(); i++) { + thread->hw_brk[i] = null_brk; + if (ppc_breakpoint_available()) + set_breakpoint(i, &thread->hw_brk[i]); + } +} + +static inline bool hw_brk_match(struct arch_hw_breakpoint *a, + struct arch_hw_breakpoint *b) +{ + if (a->address != b->address) + return false; + if (a->type != b->type) + return false; + if (a->len != b->len) + return false; + /* no need to check hw_len. it's calculated from address and len */ + return true; +} + +static void switch_hw_breakpoint(struct task_struct *new) +{ + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]), + &new->thread.hw_brk[i]))) + continue; + + __set_breakpoint(i, &new->thread.hw_brk[i]); + } } #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ @@ -829,19 +857,6 @@ bool ppc_breakpoint_available(void) } EXPORT_SYMBOL_GPL(ppc_breakpoint_available); -static inline bool hw_brk_match(struct arch_hw_breakpoint *a, - struct arch_hw_breakpoint *b) -{ - if (a->address != b->address) - return false; - if (a->type != b->type) - return false; - if (a->len != b->len) - return false; - /* no need to check hw_len. it's calculated from address and len */ - return true; -} - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static inline bool tm_enabled(struct task_struct *tsk) @@ -1174,8 +1189,7 @@ struct task_struct *__switch_to(struct task_struct *prev, * schedule DABR */ #ifndef CONFIG_HAVE_HW_BREAKPOINT - if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk[0]), &new->thread.hw_brk))) - __set_breakpoint(0, &new->thread.hw_brk); + switch_hw_breakpoint(new); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c index 12962302d6a4..0dbb35392dd2 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c +++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c @@ -67,11 +67,16 @@ int ptrace_get_debugreg(struct task_struct *child, unsigned long addr, /* We only support one DABR and no IABRS at the moment */ if (addr > 0) return -EINVAL; - dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | - (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); + dabr_fake = ((child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) | + (child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR)); return put_user(dabr_fake, datalp); } +/* + * ptrace_set_debugreg() fakes DABR and DABR is only one. So even if + * internal hw supports more than one watchpoint, we support only one + * watchpoint with this interface. + */ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data) { #ifdef CONFIG_HAVE_HW_BREAKPOINT @@ -137,7 +142,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l return ret; thread->ptrace_bps[0] = bp; - thread->hw_brk = hw_brk; + thread->hw_brk[0] = hw_brk; return 0; } @@ -159,12 +164,24 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l if (set_bp && (!ppc_breakpoint_available())) return -ENODEV; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ - task->thread.hw_brk = hw_brk; + task->thread.hw_brk[0] = hw_brk; return 0; } +static int find_empty_hw_brk(struct thread_struct *thread) +{ + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + if (!thread->hw_brk[i].address) + return i; + } + return -1; +} + long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { + int i; #ifdef CONFIG_HAVE_HW_BREAKPOINT int len = 0; struct thread_struct *thread = &child->thread; @@ -223,15 +240,16 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) return -EINVAL; - if (child->thread.hw_brk.address) + i = find_empty_hw_brk(&child->thread); + if (i < 0) return -ENOSPC; if (!ppc_breakpoint_available()) return -ENODEV; - child->thread.hw_brk = brk; + child->thread.hw_brk[i] = brk; - return 1; + return i + 1; } long ppc_del_hwdebug(struct task_struct *child, long data) @@ -241,7 +259,7 @@ long ppc_del_hwdebug(struct task_struct *child, long data) struct thread_struct *thread = &child->thread; struct perf_event *bp; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ - if (data != 1) + if (data < 1 || data > nr_wp_slots()) return -EINVAL; #ifdef CONFIG_HAVE_HW_BREAKPOINT @@ -254,11 +272,11 @@ long ppc_del_hwdebug(struct task_struct *child, long data) } return ret; #else /* CONFIG_HAVE_HW_BREAKPOINT */ - if (child->thread.hw_brk.address == 0) + if (child->thread.hw_brk[data - 1].address == 0) return -ENOENT; - child->thread.hw_brk.address = 0; - child->thread.hw_brk.type = 0; + child->thread.hw_brk[data - 1].address = 0; + child->thread.hw_brk[data - 1].type = 0; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ return 0; diff --git a/arch/powerpc/kernel/ptrace/ptrace32.c b/arch/powerpc/kernel/ptrace/ptrace32.c index 7976ddf29c0e..7589a9665ffb 100644 --- a/arch/powerpc/kernel/ptrace/ptrace32.c +++ b/arch/powerpc/kernel/ptrace/ptrace32.c @@ -259,8 +259,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, ret = put_user(child->thread.debug.dac1, (u32 __user *)data); #else dabr_fake = ( - (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | - (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); + (child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) | + (child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR)); ret = put_user(dabr_fake, (u32 __user *)data); #endif break; diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 8e29138a344a..b4143b6ff093 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -265,15 +265,20 @@ static void do_signal(struct task_struct *tsk) return; /* no signals delivered */ } -#ifndef CONFIG_PPC_ADV_DEBUG_REGS /* * Reenable the DABR before delivering the signal to * user space. The DABR will have been cleared if it * triggered inside the kernel. */ - if (tsk->thread.hw_brk.address && tsk->thread.hw_brk.type) - __set_breakpoint(0, &tsk->thread.hw_brk); -#endif + if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) { + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type) + __set_breakpoint(i, &tsk->thread.hw_brk[i]); + } + } + /* Re-enable the breakpoints for the signal stack */ thread_change_pc(tsk, tsk->thread.regs); -- cgit v1.2.3-59-g8ed1b From 6b424efa119d5ea06b15ff240dddc3b4b9f9cdfb Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:35 +0530 Subject: powerpc/watchpoint: Use loop for thread_struct->ptrace_bps ptrace_bps is already an array of size HBP_NUM_MAX. But we use hardcoded index 0 while fetching/updating it. Convert such code to loop over array. ptrace interface to use multiple watchpoint remains same. eg: two PPC_PTRACE_SETHWDEBUG calls will create two watchpoint if underneath hw supports it. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-11-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/hw_breakpoint.c | 7 +++++-- arch/powerpc/kernel/process.c | 6 +++++- arch/powerpc/kernel/ptrace/ptrace-noadv.c | 28 +++++++++++++++++++++------- 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index f5472402c06d..917cca73dbc3 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -420,10 +420,13 @@ NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify); */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { + int i; struct thread_struct *t = &tsk->thread; - unregister_hw_breakpoint(t->ptrace_bps[0]); - t->ptrace_bps[0] = NULL; + for (i = 0; i < nr_wp_slots(); i++) { + unregister_hw_breakpoint(t->ptrace_bps[i]); + t->ptrace_bps[i] = NULL; + } } void hw_breakpoint_pmu_read(struct perf_event *bp) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index d94d8925711c..77ec1299e2fd 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1604,6 +1604,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, void (*f)(void); unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; struct thread_info *ti = task_thread_info(p); +#ifdef CONFIG_HAVE_HW_BREAKPOINT + int i; +#endif klp_init_thread_info(p); @@ -1663,7 +1666,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, p->thread.ksp_limit = (unsigned long)end_of_stack(p); #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT - p->thread.ptrace_bps[0] = NULL; + for (i = 0; i < nr_wp_slots(); i++) + p->thread.ptrace_bps[i] = NULL; #endif p->thread.fp_save_area = NULL; diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c index 0dbb35392dd2..08cb8c1b504c 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c +++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c @@ -168,6 +168,19 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l return 0; } +#ifdef CONFIG_HAVE_HW_BREAKPOINT +static int find_empty_ptrace_bp(struct thread_struct *thread) +{ + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + if (!thread->ptrace_bps[i]) + return i; + } + return -1; +} +#endif + static int find_empty_hw_brk(struct thread_struct *thread) { int i; @@ -217,8 +230,9 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf len = 1; else return -EINVAL; - bp = thread->ptrace_bps[0]; - if (bp) + + i = find_empty_ptrace_bp(thread); + if (i < 0) return -ENOSPC; /* Create a new breakpoint request if one doesn't exist already */ @@ -228,13 +242,13 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf arch_bp_generic_fields(brk.type, &attr.bp_type); bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, child); - thread->ptrace_bps[0] = bp; + thread->ptrace_bps[i] = bp; if (IS_ERR(bp)) { - thread->ptrace_bps[0] = NULL; + thread->ptrace_bps[i] = NULL; return PTR_ERR(bp); } - return 1; + return i + 1; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) @@ -263,10 +277,10 @@ long ppc_del_hwdebug(struct task_struct *child, long data) return -EINVAL; #ifdef CONFIG_HAVE_HW_BREAKPOINT - bp = thread->ptrace_bps[0]; + bp = thread->ptrace_bps[data - 1]; if (bp) { unregister_hw_breakpoint(bp); - thread->ptrace_bps[0] = NULL; + thread->ptrace_bps[data - 1] = NULL; } else { ret = -ENOENT; } -- cgit v1.2.3-59-g8ed1b From c9e82aeb197df2d93b1b4234bc0c80943fa594e8 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:36 +0530 Subject: powerpc/watchpoint: Introduce is_ptrace_bp() function Introduce is_ptrace_bp() function and move the check inside the function. It will be utilize more in later set of patches. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-12-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/hw_breakpoint.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 917cca73dbc3..8028a2704874 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -91,6 +91,11 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) hw_breakpoint_disable(); } +static bool is_ptrace_bp(struct perf_event *bp) +{ + return bp->overflow_handler == ptrace_triggered; +} + /* * Perform cleanup of arch-specific counters during unregistration * of the perf-event @@ -325,7 +330,7 @@ int hw_breakpoint_handler(struct die_args *args) * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal * generated in do_dabr(). */ - if (bp->overflow_handler == ptrace_triggered) { + if (is_ptrace_bp(bp)) { perf_bp_event(bp, regs); rc = NOTIFY_DONE; goto out; -- cgit v1.2.3-59-g8ed1b From e68ef121c1f4c38edf87a3354661ceb99d522729 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:37 +0530 Subject: powerpc/watchpoint: Use builtin ALIGN*() macros Currently we calculate hw aligned start and end addresses manually. Replace them with builtin ALIGN_DOWN() and ALIGN() macros. So far end_addr was inclusive but this patch makes it exclusive (by avoiding -1) for better readability. Suggested-by: Christophe Leroy Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-13-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/hw_breakpoint.h | 5 +++-- arch/powerpc/kernel/hw_breakpoint.c | 12 ++++++------ arch/powerpc/kernel/process.c | 8 ++++---- arch/powerpc/kernel/ptrace/ptrace-noadv.c | 2 +- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index d472b2eb757e..add5aa076919 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -34,10 +34,11 @@ struct arch_hw_breakpoint { #define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \ HW_BRK_TYPE_HYP) +/* Minimum granularity */ #ifdef CONFIG_PPC_8xx -#define HW_BREAKPOINT_ALIGN 0x3 +#define HW_BREAKPOINT_SIZE 0x4 #else -#define HW_BREAKPOINT_ALIGN 0x7 +#define HW_BREAKPOINT_SIZE 0x8 #endif #define DABR_MAX_LEN 8 diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 8028a2704874..4366bd0c90c4 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -146,10 +146,10 @@ int arch_bp_generic_fields(int type, int *gen_bp_type) * <---8 bytes---> * * In this case, we should configure hw as: - * start_addr = address & ~HW_BREAKPOINT_ALIGN + * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1) * len = 16 bytes * - * @start_addr and @end_addr are inclusive. + * @start_addr is inclusive but @end_addr is exclusive. */ static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) { @@ -157,14 +157,14 @@ static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) u16 hw_len; unsigned long start_addr, end_addr; - start_addr = hw->address & ~HW_BREAKPOINT_ALIGN; - end_addr = (hw->address + hw->len - 1) | HW_BREAKPOINT_ALIGN; - hw_len = end_addr - start_addr + 1; + start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE); + end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE); + hw_len = end_addr - start_addr; if (dawr_enabled()) { max_len = DAWR_MAX_LEN; /* DAWR region can't cross 512 bytes boundary */ - if ((start_addr >> 9) != (end_addr >> 9)) + if (ALIGN(start_addr, SZ_512M) != ALIGN(end_addr - 1, SZ_512M)) return -EINVAL; } else if (IS_ENABLED(CONFIG_PPC_8xx)) { /* 8xx can setup a range without limitation */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 77ec1299e2fd..9b11575dcb8a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -800,12 +800,12 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk) unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW | LCTRL1_CRWF_RW; unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN; - unsigned long start_addr = brk->address & ~HW_BREAKPOINT_ALIGN; - unsigned long end_addr = (brk->address + brk->len - 1) | HW_BREAKPOINT_ALIGN; + unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE); + unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE); if (start_addr == 0) lctrl2 |= LCTRL2_LW0LA_F; - else if (end_addr == ~0U) + else if (end_addr == 0) lctrl2 |= LCTRL2_LW0LA_E; else lctrl2 |= LCTRL2_LW0LA_EandF; @@ -821,7 +821,7 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk) lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO; mtspr(SPRN_CMPE, start_addr - 1); - mtspr(SPRN_CMPF, end_addr + 1); + mtspr(SPRN_CMPF, end_addr); mtspr(SPRN_LCTRL1, lctrl1); mtspr(SPRN_LCTRL2, lctrl2); diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c index 08cb8c1b504c..697c7e4b5877 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c +++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c @@ -216,7 +216,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf if ((unsigned long)bp_info->addr >= TASK_SIZE) return -EIO; - brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN; + brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE); brk.type = HW_BRK_TYPE_TRANSLATE; brk.len = DABR_MAX_LEN; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) -- cgit v1.2.3-59-g8ed1b From 74c6881019b7d56c327fffc268d97adb5eb1b4f9 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:38 +0530 Subject: powerpc/watchpoint: Prepare handler to handle more than one watchpoint Currently we assume that we have only one watchpoint supported by hw. Get rid of that assumption and use dynamic loop instead. This should make supporting more watchpoints very easy. With more than one watchpoint, exception handler needs to know which DAWR caused the exception, and hw currently does not provide it. So we need sw logic for the same. To figure out which DAWR caused the exception, check all different combinations of user specified range, DAWR address range, actual access range and DAWRX constrains. For ex, if user specified range and actual access range overlaps but DAWRX is configured for readonly watchpoint and the instruction is store, this DAWR must not have caused exception. Signed-off-by: Ravi Bangoria Reviewed-by: Michael Neuling [mpe: Unsplit multi-line printk() strings, fix some sparse warnings] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200514111741.97993-14-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/processor.h | 2 +- arch/powerpc/include/asm/sstep.h | 2 + arch/powerpc/kernel/hw_breakpoint.c | 395 +++++++++++++++++++++++++++-------- arch/powerpc/kernel/process.c | 3 - 4 files changed, 310 insertions(+), 92 deletions(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index fba6b586e3c8..4e53df163b92 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -185,7 +185,7 @@ struct thread_struct { * Helps identify source of single-step exception and subsequent * hw-breakpoint enablement */ - struct perf_event *last_hit_ubp; + struct perf_event *last_hit_ubp[HBP_NUM_MAX]; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */ unsigned long trap_nr; /* last trap # on this thread */ diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 9b200a5f8794..3b01c69a44aa 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -49,6 +49,8 @@ enum instruction_type { #define INSTR_TYPE_MASK 0x1f +#define OP_IS_LOAD(type) ((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX) +#define OP_IS_STORE(type) ((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX) #define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX) /* Compute flags, ORed in with type */ diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 4366bd0c90c4..5fdc6c4f14af 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -31,7 +31,7 @@ * Stores the breakpoints currently in use on each breakpoint address * register for every cpu */ -static DEFINE_PER_CPU(struct perf_event *, bp_per_reg); +static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]); /* * Returns total number of data or instruction breakpoints available. @@ -43,6 +43,17 @@ int hw_breakpoint_slots(int type) return 0; /* no instruction breakpoints available */ } +static bool single_step_pending(void) +{ + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + if (current->thread.last_hit_ubp[i]) + return true; + } + return false; +} + /* * Install a perf counter breakpoint. * @@ -55,16 +66,26 @@ int hw_breakpoint_slots(int type) int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); - struct perf_event **slot = this_cpu_ptr(&bp_per_reg); + struct perf_event **slot; + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + slot = this_cpu_ptr(&bp_per_reg[i]); + if (!*slot) { + *slot = bp; + break; + } + } - *slot = bp; + if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) + return -EBUSY; /* * Do not install DABR values if the instruction must be single-stepped. * If so, DABR will be populated in single_step_dabr_instruction(). */ - if (current->thread.last_hit_ubp != bp) - __set_breakpoint(0, info); + if (!single_step_pending()) + __set_breakpoint(i, info); return 0; } @@ -80,15 +101,22 @@ int arch_install_hw_breakpoint(struct perf_event *bp) */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { - struct perf_event **slot = this_cpu_ptr(&bp_per_reg); + struct arch_hw_breakpoint null_brk = {0}; + struct perf_event **slot; + int i; - if (*slot != bp) { - WARN_ONCE(1, "Can't find the breakpoint"); - return; + for (i = 0; i < nr_wp_slots(); i++) { + slot = this_cpu_ptr(&bp_per_reg[i]); + if (*slot == bp) { + *slot = NULL; + break; + } } - *slot = NULL; - hw_breakpoint_disable(); + if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) + return; + + __set_breakpoint(i, &null_brk); } static bool is_ptrace_bp(struct perf_event *bp) @@ -108,8 +136,14 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp) * restoration variables to prevent dangling pointers. * FIXME, this should not be using bp->ctx at all! Sayeth peterz. */ - if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) - bp->ctx->task->thread.last_hit_ubp = NULL; + if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) { + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + if (bp->ctx->task->thread.last_hit_ubp[i] == bp) + bp->ctx->task->thread.last_hit_ubp[i] = NULL; + } + } } /* @@ -221,90 +255,209 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) { struct arch_hw_breakpoint *info; + int i; - if (likely(!tsk->thread.last_hit_ubp)) - return; + for (i = 0; i < nr_wp_slots(); i++) { + if (unlikely(tsk->thread.last_hit_ubp[i])) + goto reset; + } + return; - info = counter_arch_bp(tsk->thread.last_hit_ubp); +reset: regs->msr &= ~MSR_SE; - __set_breakpoint(0, info); - tsk->thread.last_hit_ubp = NULL; + for (i = 0; i < nr_wp_slots(); i++) { + info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); + __set_breakpoint(i, info); + tsk->thread.last_hit_ubp[i] = NULL; + } } -static bool dar_within_range(unsigned long dar, struct arch_hw_breakpoint *info) +static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info) { return ((info->address <= dar) && (dar - info->address < info->len)); } -static bool -dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info) +static bool dar_user_range_overlaps(unsigned long dar, int size, + struct arch_hw_breakpoint *info) +{ + return ((dar < info->address + info->len) && + (dar + size > info->address)); +} + +static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) +{ + unsigned long hw_start_addr, hw_end_addr; + + hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); + hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); + + return ((hw_start_addr <= dar) && (hw_end_addr > dar)); +} + +static bool dar_hw_range_overlaps(unsigned long dar, int size, + struct arch_hw_breakpoint *info) { - return ((dar <= info->address + info->len - 1) && - (dar + size - 1 >= info->address)); + unsigned long hw_start_addr, hw_end_addr; + + hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); + hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); + + return ((dar < hw_end_addr) && (dar + size > hw_start_addr)); } /* - * Handle debug exception notifications. + * If hw has multiple DAWR registers, we also need to check all + * dawrx constraint bits to confirm this is _really_ a valid event. */ -static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp, - struct arch_hw_breakpoint *info) +static bool check_dawrx_constraints(struct pt_regs *regs, int type, + struct arch_hw_breakpoint *info) { - struct ppc_inst instr = ppc_inst(0); - int ret, type, size; - struct instruction_op op; - unsigned long addr = info->address; + if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) + return false; - if (__get_user_instr_inatomic(instr, (void __user *)regs->nip)) - goto fail; + if (OP_IS_STORE(type) && !(info->type & HW_BRK_TYPE_WRITE)) + return false; - ret = analyse_instr(&op, regs, instr); - type = GETTYPE(op.type); - size = GETSIZE(op.type); + if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) + return false; - if (!ret && (type == LARX || type == STCX)) { - printk_ratelimited("Breakpoint hit on instruction that can't be emulated." - " Breakpoint at 0x%lx will be disabled.\n", addr); - goto disable; - } + if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER)) + return false; + + return true; +} + +/* + * Return true if the event is valid wrt dawr configuration, + * including extraneous exception. Otherwise return false. + */ +static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, + int type, int size, struct arch_hw_breakpoint *info) +{ + bool in_user_range = dar_in_user_range(regs->dar, info); + bool dawrx_constraints; /* - * If it's extraneous event, we still need to emulate/single- - * step the instruction, but we don't generate an event. + * 8xx supports only one breakpoint and thus we can + * unconditionally return true. */ - if (size && !dar_range_overlaps(regs->dar, size, info)) - info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + if (IS_ENABLED(CONFIG_PPC_8xx)) { + if (!in_user_range) + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + return true; + } - /* Do not emulate user-space instructions, instead single-step them */ - if (user_mode(regs)) { - current->thread.last_hit_ubp = bp; - regs->msr |= MSR_SE; + if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { + if (in_user_range) + return true; + + if (dar_in_hw_range(regs->dar, info)) { + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + return true; + } return false; } - if (!emulate_step(regs, instr)) - goto fail; + dawrx_constraints = check_dawrx_constraints(regs, type, info); - return true; + if (dar_user_range_overlaps(regs->dar, size, info)) + return dawrx_constraints; + + if (dar_hw_range_overlaps(regs->dar, size, info)) { + if (dawrx_constraints) { + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + return true; + } + } + return false; +} + +static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, + int *type, int *size, bool *larx_stcx) +{ + struct instruction_op op; + + if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) + return; + + analyse_instr(&op, regs, *instr); -fail: /* - * We've failed in reliably handling the hw-breakpoint. Unregister - * it and throw a warning message to let the user know about it. + * Set size = 8 if analyse_instr() fails. If it's a userspace + * watchpoint(valid or extraneous), we can notify user about it. + * If it's a kernel watchpoint, instruction emulation will fail + * in stepping_handler() and watchpoint will be disabled. */ - WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " - "0x%lx will be disabled.", addr); + *type = GETTYPE(op.type); + *size = !(*type == UNKNOWN) ? GETSIZE(op.type) : 8; + *larx_stcx = (*type == LARX || *type == STCX); +} + +/* + * We've failed in reliably handling the hw-breakpoint. Unregister + * it and throw a warning message to let the user know about it. + */ +static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info) +{ + WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.", + info->address); + perf_event_disable_inatomic(bp); +} -disable: +static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info) +{ + printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n", + info->address); perf_event_disable_inatomic(bp); - return false; +} + +static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, + struct arch_hw_breakpoint **info, int *hit, + struct ppc_inst instr) +{ + int i; + int stepped; + + /* Do not emulate user-space instructions, instead single-step them */ + if (user_mode(regs)) { + for (i = 0; i < nr_wp_slots(); i++) { + if (!hit[i]) + continue; + current->thread.last_hit_ubp[i] = bp[i]; + info[i] = NULL; + } + regs->msr |= MSR_SE; + return false; + } + + stepped = emulate_step(regs, instr); + if (!stepped) { + for (i = 0; i < nr_wp_slots(); i++) { + if (!hit[i]) + continue; + handler_error(bp[i], info[i]); + info[i] = NULL; + } + return false; + } + return true; } int hw_breakpoint_handler(struct die_args *args) { + bool err = false; int rc = NOTIFY_STOP; - struct perf_event *bp; + struct perf_event *bp[HBP_NUM_MAX] = { NULL }; struct pt_regs *regs = args->regs; - struct arch_hw_breakpoint *info; + struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL }; + int i; + int hit[HBP_NUM_MAX] = {0}; + int nr_hit = 0; + bool ptrace_bp = false; + struct ppc_inst instr = ppc_inst(0); + int type = 0; + int size = 0; + bool larx_stcx = false; /* Disable breakpoints during exception handling */ hw_breakpoint_disable(); @@ -317,12 +470,40 @@ int hw_breakpoint_handler(struct die_args *args) */ rcu_read_lock(); - bp = __this_cpu_read(bp_per_reg); - if (!bp) { + if (!IS_ENABLED(CONFIG_PPC_8xx)) + get_instr_detail(regs, &instr, &type, &size, &larx_stcx); + + for (i = 0; i < nr_wp_slots(); i++) { + bp[i] = __this_cpu_read(bp_per_reg[i]); + if (!bp[i]) + continue; + + info[i] = counter_arch_bp(bp[i]); + info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; + + if (check_constraints(regs, instr, type, size, info[i])) { + if (!IS_ENABLED(CONFIG_PPC_8xx) && + ppc_inst_equal(instr, ppc_inst(0))) { + handler_error(bp[i], info[i]); + info[i] = NULL; + err = 1; + continue; + } + + if (is_ptrace_bp(bp[i])) + ptrace_bp = true; + hit[i] = 1; + nr_hit++; + } + } + + if (err) + goto reset; + + if (!nr_hit) { rc = NOTIFY_DONE; goto out; } - info = counter_arch_bp(bp); /* * Return early after invoking user-callback function without restoring @@ -330,29 +511,50 @@ int hw_breakpoint_handler(struct die_args *args) * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal * generated in do_dabr(). */ - if (is_ptrace_bp(bp)) { - perf_bp_event(bp, regs); + if (ptrace_bp) { + for (i = 0; i < nr_wp_slots(); i++) { + if (!hit[i]) + continue; + perf_bp_event(bp[i], regs); + info[i] = NULL; + } rc = NOTIFY_DONE; - goto out; + goto reset; } - info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; - if (IS_ENABLED(CONFIG_PPC_8xx)) { - if (!dar_within_range(regs->dar, info)) - info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; - } else { - if (!stepping_handler(regs, bp, info)) - goto out; + if (!IS_ENABLED(CONFIG_PPC_8xx)) { + if (larx_stcx) { + for (i = 0; i < nr_wp_slots(); i++) { + if (!hit[i]) + continue; + larx_stcx_err(bp[i], info[i]); + info[i] = NULL; + } + goto reset; + } + + if (!stepping_handler(regs, bp, info, hit, instr)) + goto reset; } /* * As a policy, the callback is invoked in a 'trigger-after-execute' * fashion */ - if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) - perf_bp_event(bp, regs); + for (i = 0; i < nr_wp_slots(); i++) { + if (!hit[i]) + continue; + if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) + perf_bp_event(bp[i], regs); + } + +reset: + for (i = 0; i < nr_wp_slots(); i++) { + if (!info[i]) + continue; + __set_breakpoint(i, info[i]); + } - __set_breakpoint(0, info); out: rcu_read_unlock(); return rc; @@ -367,26 +569,43 @@ static int single_step_dabr_instruction(struct die_args *args) struct pt_regs *regs = args->regs; struct perf_event *bp = NULL; struct arch_hw_breakpoint *info; + int i; + bool found = false; - bp = current->thread.last_hit_ubp; /* * Check if we are single-stepping as a result of a * previous HW Breakpoint exception */ - if (!bp) - return NOTIFY_DONE; + for (i = 0; i < nr_wp_slots(); i++) { + bp = current->thread.last_hit_ubp[i]; + + if (!bp) + continue; + + found = true; + info = counter_arch_bp(bp); + + /* + * We shall invoke the user-defined callback function in the + * single stepping handler to confirm to 'trigger-after-execute' + * semantics + */ + if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) + perf_bp_event(bp, regs); + current->thread.last_hit_ubp[i] = NULL; + } - info = counter_arch_bp(bp); + if (!found) + return NOTIFY_DONE; - /* - * We shall invoke the user-defined callback function in the single - * stepping handler to confirm to 'trigger-after-execute' semantics - */ - if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) - perf_bp_event(bp, regs); + for (i = 0; i < nr_wp_slots(); i++) { + bp = __this_cpu_read(bp_per_reg[i]); + if (!bp) + continue; - __set_breakpoint(0, info); - current->thread.last_hit_ubp = NULL; + info = counter_arch_bp(bp); + __set_breakpoint(i, info); + } /* * If the process was being single-stepped by ptrace, let the diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9b11575dcb8a..048d64c4e115 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -629,9 +629,6 @@ void do_break (struct pt_regs *regs, unsigned long address, if (debugger_break_match(regs)) return; - /* Clear the breakpoint */ - hw_breakpoint_disable(); - /* Deliver the signal to userspace */ force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address); } -- cgit v1.2.3-59-g8ed1b From 29da4f91c0c1fbda12b8a31be0d564930208c92e Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:39 +0530 Subject: powerpc/watchpoint: Don't allow concurrent perf and ptrace events With Book3s DAWR, ptrace and perf watchpoints on powerpc behaves differently. Ptrace watchpoint works in one-shot mode and generates signal before executing instruction. It's ptrace user's job to single-step the instruction and re-enable the watchpoint. OTOH, in case of perf watchpoint, kernel emulates/single-steps the instruction and then generates event. If perf and ptrace creates two events with same or overlapping address ranges, it's ambiguous to decide who should single-step the instruction. Because of this issue, don't allow perf and ptrace watchpoint at the same time if their address range overlaps. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-15-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/hw_breakpoint.h | 2 + arch/powerpc/kernel/hw_breakpoint.c | 221 +++++++++++++++++++++++++++++++ kernel/events/hw_breakpoint.c | 16 +++ 3 files changed, 239 insertions(+) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index add5aa076919..f42a55eb77d2 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -70,6 +70,8 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); int arch_install_hw_breakpoint(struct perf_event *bp); void arch_uninstall_hw_breakpoint(struct perf_event *bp); +int arch_reserve_bp_slot(struct perf_event *bp); +void arch_release_bp_slot(struct perf_event *bp); void arch_unregister_hw_breakpoint(struct perf_event *bp); void hw_breakpoint_pmu_read(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 5fdc6c4f14af..0000daf0e1da 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -124,6 +124,227 @@ static bool is_ptrace_bp(struct perf_event *bp) return bp->overflow_handler == ptrace_triggered; } +struct breakpoint { + struct list_head list; + struct perf_event *bp; + bool ptrace_bp; +}; + +static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); +static LIST_HEAD(task_bps); + +static struct breakpoint *alloc_breakpoint(struct perf_event *bp) +{ + struct breakpoint *tmp; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ERR_PTR(-ENOMEM); + tmp->bp = bp; + tmp->ptrace_bp = is_ptrace_bp(bp); + return tmp; +} + +static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2) +{ + __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr; + + bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE); + bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE); + bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE); + bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE); + + return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr); +} + +static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp) +{ + return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp; +} + +static bool can_co_exist(struct breakpoint *b, struct perf_event *bp) +{ + return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp)); +} + +static int task_bps_add(struct perf_event *bp) +{ + struct breakpoint *tmp; + + tmp = alloc_breakpoint(bp); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); + + list_add(&tmp->list, &task_bps); + return 0; +} + +static void task_bps_remove(struct perf_event *bp) +{ + struct list_head *pos, *q; + + list_for_each_safe(pos, q, &task_bps) { + struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); + + if (tmp->bp == bp) { + list_del(&tmp->list); + kfree(tmp); + break; + } + } +} + +/* + * If any task has breakpoint from alternate infrastructure, + * return true. Otherwise return false. + */ +static bool all_task_bps_check(struct perf_event *bp) +{ + struct breakpoint *tmp; + + list_for_each_entry(tmp, &task_bps, list) { + if (!can_co_exist(tmp, bp)) + return true; + } + return false; +} + +/* + * If same task has breakpoint from alternate infrastructure, + * return true. Otherwise return false. + */ +static bool same_task_bps_check(struct perf_event *bp) +{ + struct breakpoint *tmp; + + list_for_each_entry(tmp, &task_bps, list) { + if (tmp->bp->hw.target == bp->hw.target && + !can_co_exist(tmp, bp)) + return true; + } + return false; +} + +static int cpu_bps_add(struct perf_event *bp) +{ + struct breakpoint **cpu_bp; + struct breakpoint *tmp; + int i = 0; + + tmp = alloc_breakpoint(bp); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); + + cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); + for (i = 0; i < nr_wp_slots(); i++) { + if (!cpu_bp[i]) { + cpu_bp[i] = tmp; + break; + } + } + return 0; +} + +static void cpu_bps_remove(struct perf_event *bp) +{ + struct breakpoint **cpu_bp; + int i = 0; + + cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); + for (i = 0; i < nr_wp_slots(); i++) { + if (!cpu_bp[i]) + continue; + + if (cpu_bp[i]->bp == bp) { + kfree(cpu_bp[i]); + cpu_bp[i] = NULL; + break; + } + } +} + +static bool cpu_bps_check(int cpu, struct perf_event *bp) +{ + struct breakpoint **cpu_bp; + int i; + + cpu_bp = per_cpu_ptr(cpu_bps, cpu); + for (i = 0; i < nr_wp_slots(); i++) { + if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) + return true; + } + return false; +} + +static bool all_cpu_bps_check(struct perf_event *bp) +{ + int cpu; + + for_each_online_cpu(cpu) { + if (cpu_bps_check(cpu, bp)) + return true; + } + return false; +} + +/* + * We don't use any locks to serialize accesses to cpu_bps or task_bps + * because are already inside nr_bp_mutex. + */ +int arch_reserve_bp_slot(struct perf_event *bp) +{ + int ret; + + /* ptrace breakpoint */ + if (is_ptrace_bp(bp)) { + if (all_cpu_bps_check(bp)) + return -ENOSPC; + + if (same_task_bps_check(bp)) + return -ENOSPC; + + return task_bps_add(bp); + } + + /* perf breakpoint */ + if (is_kernel_addr(bp->attr.bp_addr)) + return 0; + + if (bp->hw.target && bp->cpu == -1) { + if (same_task_bps_check(bp)) + return -ENOSPC; + + return task_bps_add(bp); + } else if (!bp->hw.target && bp->cpu != -1) { + if (all_task_bps_check(bp)) + return -ENOSPC; + + return cpu_bps_add(bp); + } + + if (same_task_bps_check(bp)) + return -ENOSPC; + + ret = cpu_bps_add(bp); + if (ret) + return ret; + ret = task_bps_add(bp); + if (ret) + cpu_bps_remove(bp); + + return ret; +} + +void arch_release_bp_slot(struct perf_event *bp) +{ + if (!is_kernel_addr(bp->attr.bp_addr)) { + if (bp->hw.target) + task_bps_remove(bp); + if (bp->cpu != -1) + cpu_bps_remove(bp); + } +} + /* * Perform cleanup of arch-specific counters during unregistration * of the perf-event diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 3cc8416ec844..b48d7039a015 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -213,6 +213,15 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, list_del(&bp->hw.bp_list); } +__weak int arch_reserve_bp_slot(struct perf_event *bp) +{ + return 0; +} + +__weak void arch_release_bp_slot(struct perf_event *bp) +{ +} + /* * Function to perform processor-specific cleanup during unregistration */ @@ -270,6 +279,7 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type) struct bp_busy_slots slots = {0}; enum bp_type_idx type; int weight; + int ret; /* We couldn't initialize breakpoint constraints on boot */ if (!constraints_initialized) @@ -294,6 +304,10 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type) if (slots.pinned + (!!slots.flexible) > nr_slots[type]) return -ENOSPC; + ret = arch_reserve_bp_slot(bp); + if (ret) + return ret; + toggle_bp_slot(bp, true, type, weight); return 0; @@ -317,6 +331,8 @@ static void __release_bp_slot(struct perf_event *bp, u64 bp_type) enum bp_type_idx type; int weight; + arch_release_bp_slot(bp); + type = find_slot_idx(bp_type); weight = hw_breakpoint_weight(bp); toggle_bp_slot(bp, false, type, weight); -- cgit v1.2.3-59-g8ed1b From 514db915e7b33e7eaf8e40192b93380f79b319b5 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:40 +0530 Subject: powerpc/watchpoint/xmon: Don't allow breakpoint overwriting Xmon allows overwriting breakpoints because it's supported by only one DAWR. But with multiple DAWRs, overwriting becomes ambiguous or unnecessary complicated. So let's not allow it. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-16-ravi.bangoria@linux.ibm.com --- arch/powerpc/xmon/xmon.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 30b3e3d99c0d..094bf4715f2c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1399,6 +1399,10 @@ bpt_cmds(void) printf("Hardware data breakpoint not supported on this cpu\n"); break; } + if (dabr.enabled) { + printf("Couldn't find free breakpoint register\n"); + break; + } mode = 7; cmd = inchar(); if (cmd == 'r') -- cgit v1.2.3-59-g8ed1b From 30df74d67d48949da87e3a5b57c381763e8fd526 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 May 2020 16:47:41 +0530 Subject: powerpc/watchpoint/xmon: Support 2nd DAWR Add support for 2nd DAWR in xmon. With this, we can have two simultaneous breakpoints from xmon. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Reviewed-by: Michael Neuling Link: https://lore.kernel.org/r/20200514111741.97993-17-ravi.bangoria@linux.ibm.com --- arch/powerpc/xmon/xmon.c | 101 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 094bf4715f2c..de585204d1d2 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -112,7 +112,7 @@ struct bpt { #define BP_DABR 4 static struct bpt bpts[NBPTS]; -static struct bpt dabr; +static struct bpt dabr[HBP_NUM_MAX]; static struct bpt *iabr; static unsigned bpinstr = 0x7fe00008; /* trap */ @@ -784,10 +784,17 @@ static int xmon_sstep(struct pt_regs *regs) static int xmon_break_match(struct pt_regs *regs) { + int i; + if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) return 0; - if (dabr.enabled == 0) - return 0; + for (i = 0; i < nr_wp_slots(); i++) { + if (dabr[i].enabled) + goto found; + } + return 0; + +found: xmon_core(regs, 0); return 1; } @@ -948,13 +955,16 @@ static void insert_bpts(void) static void insert_cpu_bpts(void) { + int i; struct arch_hw_breakpoint brk; - if (dabr.enabled) { - brk.address = dabr.address; - brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; - brk.len = DABR_MAX_LEN; - __set_breakpoint(0, &brk); + for (i = 0; i < nr_wp_slots(); i++) { + if (dabr[i].enabled) { + brk.address = dabr[i].address; + brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; + brk.len = 8; + __set_breakpoint(i, &brk); + } } if (iabr) @@ -1366,6 +1376,35 @@ static long check_bp_loc(unsigned long addr) return 1; } +static int find_free_data_bpt(void) +{ + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + if (!dabr[i].enabled) + return i; + } + printf("Couldn't find free breakpoint register\n"); + return -1; +} + +static void print_data_bpts(void) +{ + int i; + + for (i = 0; i < nr_wp_slots(); i++) { + if (!dabr[i].enabled) + continue; + + printf(" data "REG" [", dabr[i].address); + if (dabr[i].enabled & 1) + printf("r"); + if (dabr[i].enabled & 2) + printf("w"); + printf("]\n"); + } +} + static char *breakpoint_help_string = "Breakpoint command usage:\n" "b show breakpoints\n" @@ -1399,10 +1438,9 @@ bpt_cmds(void) printf("Hardware data breakpoint not supported on this cpu\n"); break; } - if (dabr.enabled) { - printf("Couldn't find free breakpoint register\n"); + i = find_free_data_bpt(); + if (i < 0) break; - } mode = 7; cmd = inchar(); if (cmd == 'r') @@ -1411,15 +1449,15 @@ bpt_cmds(void) mode = 6; else termch = cmd; - dabr.address = 0; - dabr.enabled = 0; - if (scanhex(&dabr.address)) { - if (!is_kernel_addr(dabr.address)) { + dabr[i].address = 0; + dabr[i].enabled = 0; + if (scanhex(&dabr[i].address)) { + if (!is_kernel_addr(dabr[i].address)) { printf(badaddr); break; } - dabr.address &= ~HW_BRK_TYPE_DABR; - dabr.enabled = mode | BP_DABR; + dabr[i].address &= ~HW_BRK_TYPE_DABR; + dabr[i].enabled = mode | BP_DABR; } force_enable_xmon(); @@ -1458,7 +1496,9 @@ bpt_cmds(void) for (i = 0; i < NBPTS; ++i) bpts[i].enabled = 0; iabr = NULL; - dabr.enabled = 0; + for (i = 0; i < nr_wp_slots(); i++) + dabr[i].enabled = 0; + printf("All breakpoints cleared\n"); break; } @@ -1492,14 +1532,7 @@ bpt_cmds(void) if (xmon_is_ro || !scanhex(&a)) { /* print all breakpoints */ printf(" type address\n"); - if (dabr.enabled) { - printf(" data "REG" [", dabr.address); - if (dabr.enabled & 1) - printf("r"); - if (dabr.enabled & 2) - printf("w"); - printf("]\n"); - } + print_data_bpts(); for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { if (!bp->enabled) continue; @@ -1959,8 +1992,13 @@ static void dump_207_sprs(void) printf("hfscr = %.16lx dhdes = %.16lx rpr = %.16lx\n", mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR)); - printf("dawr = %.16lx dawrx = %.16lx ciabr = %.16lx\n", - mfspr(SPRN_DAWR0), mfspr(SPRN_DAWRX0), mfspr(SPRN_CIABR)); + printf("dawr0 = %.16lx dawrx0 = %.16lx\n", + mfspr(SPRN_DAWR0), mfspr(SPRN_DAWRX0)); + if (nr_wp_slots() > 1) { + printf("dawr1 = %.16lx dawrx1 = %.16lx\n", + mfspr(SPRN_DAWR1), mfspr(SPRN_DAWRX1)); + } + printf("ciabr = %.16lx\n", mfspr(SPRN_CIABR)); #endif } @@ -3909,10 +3947,9 @@ static void clear_all_bpt(void) bpts[i].enabled = 0; /* Clear any data or iabr breakpoints */ - if (iabr || dabr.enabled) { - iabr = NULL; - dabr.enabled = 0; - } + iabr = NULL; + for (i = 0; i < nr_wp_slots(); i++) + dabr[i].enabled = 0; } #ifdef CONFIG_DEBUG_FS -- cgit v1.2.3-59-g8ed1b From 8f53f9c0f68ab2168f637494b9e24034899c1310 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 May 2020 08:36:16 +0530 Subject: powerpc/book3s64/radix/tlb: Determine hugepage flush correctly With a 64K page size flush with start and end: (start, end) = (721f680d0000, 721f680e0000) results in: (hstart, hend) = (721f68200000, 721f68000000) ie. hstart is above hend, which indicates no huge page flush is needed. However the current logic incorrectly sets hflush = true in this case, because hstart != hend. That causes us to call __tlbie_va_range() passing hstart/hend, to do a huge page flush even though we don't need to. __tlbie_va_range() will skip the actual tlbie operation for start > end. But it will still end up calling fixup_tlbie_va_range() and doing the TLB fixups in there, which is harmless but unnecessary work. Reported-by: Bharata B Rao Signed-off-by: Aneesh Kumar K.V Reviewed-by: Nicholas Piggin [mpe: Drop else case, hflush is already false, flesh out change log] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200513030616.152288-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/radix_tlb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 758ade2c2b6e..b5cc9b23cf02 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -884,9 +884,7 @@ is_local: if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { hstart = (start + PMD_SIZE - 1) & PMD_MASK; hend = end & PMD_MASK; - if (hstart == hend) - hflush = false; - else + if (hstart < hend) hflush = true; } -- cgit v1.2.3-59-g8ed1b From aa3bc365ee73765af5059678bf55b0f3e4a3e6c4 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Sat, 16 May 2020 09:20:46 -0700 Subject: powerpc/ps3: Add check for otheros image size The ps3's otheros flash loader has a size limit of 16 MiB for the uncompressed image. If that limit will be reached output the flash image file as 'otheros-too-big.bld'. Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/897c2a59-378e-7c9b-3976-d0a0def90913@infradead.org --- arch/powerpc/boot/wrapper | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 35ace40d9fc2..d0b5f202c49c 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -571,7 +571,18 @@ ps3) count=$overlay_size bs=1 odir="$(dirname "$ofile.bin")" - rm -f "$odir/otheros.bld" - gzip -n --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld" + + # The ps3's flash loader has a size limit of 16 MiB for the uncompressed + # image. If a compressed image that exceeded this limit is written to + # flash the loader will decompress that image until the 16 MiB limit is + # reached, then enter the system reset vector of the partially decompressed + # image. No warning is issued. + rm -f "$odir"/{otheros,otheros-too-big}.bld + size=$(${CROSS}nm --no-sort --radix=d "$ofile" | egrep ' _end$' | cut -d' ' -f1) + bld="otheros.bld" + if [ $size -gt $((0x1000000)) ]; then + bld="otheros-too-big.bld" + fi + gzip -n --force -9 --stdout "$ofile.bin" > "$odir/$bld" ;; esac -- cgit v1.2.3-59-g8ed1b From ceffa63acce7165c442395b7d64a11ab8b5c5dca Mon Sep 17 00:00:00 2001 From: Chen Zhou Date: Sat, 9 May 2020 10:08:38 +0800 Subject: powerpc/powernv: add NULL check after kzalloc Fixes coccicheck warning: ./arch/powerpc/platforms/powernv/opal.c:813:1-5: alloc with no test, possible model on line 814 Add NULL check after kzalloc. Signed-off-by: Chen Zhou Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200509020838.121660-1-chenzhou10@huawei.com --- arch/powerpc/platforms/powernv/opal.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 2b3dfd0b6cdd..d95954ad4c0a 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -811,6 +811,10 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name, goto out; attr = kzalloc(sizeof(*attr), GFP_KERNEL); + if (!attr) { + rc = -ENOMEM; + goto out; + } name = kstrdup(export_name, GFP_KERNEL); if (!name) { rc = -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 9384e552aabb647ec22acb00181ca1715b0fcdfe Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 29 Apr 2020 17:02:47 +1000 Subject: powerpc/64s: Fix early_init_mmu section mismatch Christian reports: MODPOST vmlinux.o WARNING: modpost: vmlinux.o(.text.unlikely+0x1a0): Section mismatch in reference from the function .early_init_mmu() to the function .init.text:.radix__early_init_mmu() The function .early_init_mmu() references the function __init .radix__early_init_mmu(). This is often because .early_init_mmu lacks a __init annotation or the annotation of .radix__early_init_mmu is wrong. WARNING: modpost: vmlinux.o(.text.unlikely+0x1ac): Section mismatch in reference from the function .early_init_mmu() to the function .init.text:.hash__early_init_mmu() The function .early_init_mmu() references the function __init .hash__early_init_mmu(). This is often because .early_init_mmu lacks a __init annotation or the annotation of .hash__early_init_mmu is wrong. The compiler is uninlining early_init_mmu and not putting it in an init section because there is no annotation. Add it. Reported-by: Christian Zigotzky Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Tested-by: Christian Zigotzky Link: https://lore.kernel.org/r/20200429070247.1678172-1-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index f0a9ff690881..5393a535240c 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -211,7 +211,7 @@ void hash__early_init_devtree(void); void radix__early_init_devtree(void); extern void hash__early_init_mmu(void); extern void radix__early_init_mmu(void); -static inline void early_init_mmu(void) +static inline void __init early_init_mmu(void) { if (radix_enabled()) return radix__early_init_mmu(); -- cgit v1.2.3-59-g8ed1b From c2e929b18cea6cbf71364f22d742d9aad7f4677a Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Thu, 5 Mar 2020 23:48:52 -0500 Subject: powerpc/64s/pgtable: fix an undefined behaviour Booting a power9 server with hash MMU could trigger an undefined behaviour because pud_offset(p4d, 0) will do, 0 >> (PAGE_SHIFT:16 + PTE_INDEX_SIZE:8 + H_PMD_INDEX_SIZE:10) Fix it by converting pud_index() and friends to static inline functions. UBSAN: shift-out-of-bounds in arch/powerpc/mm/ptdump/ptdump.c:282:15 shift exponent 34 is too large for 32-bit type 'int' CPU: 6 PID: 1 Comm: swapper/0 Not tainted 5.6.0-rc4-next-20200303+ #13 Call Trace: dump_stack+0xf4/0x164 (unreliable) ubsan_epilogue+0x18/0x78 __ubsan_handle_shift_out_of_bounds+0x160/0x21c walk_pagetables+0x2cc/0x700 walk_pud at arch/powerpc/mm/ptdump/ptdump.c:282 (inlined by) walk_pagetables at arch/powerpc/mm/ptdump/ptdump.c:311 ptdump_check_wx+0x8c/0xf0 mark_rodata_ro+0x48/0x80 kernel_init+0x74/0x194 ret_from_kernel_thread+0x5c/0x74 Suggested-by: Christophe Leroy Signed-off-by: Qian Cai Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Link: https://lore.kernel.org/r/20200306044852.3236-1-cai@lca.pw --- arch/powerpc/include/asm/book3s/64/pgtable.h | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index e1f551159f7d..ec17fc343be0 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1003,10 +1003,25 @@ extern struct page *pgd_page(pgd_t pgd); #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) #define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) -#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) -#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) -#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) -#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) +static inline unsigned long pgd_index(unsigned long address) +{ + return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); +} + +static inline unsigned long pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} + +static inline unsigned long pmd_index(unsigned long address) +{ + return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +} + +static inline unsigned long pte_index(unsigned long address) +{ + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); +} /* * Find an entry in a page-table-directory. We combine the address region -- cgit v1.2.3-59-g8ed1b From 91ffeaa7e5dd62753e23a1204dc7ecd11f26eadc Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 13 Apr 2020 12:06:45 -0700 Subject: powerpc/wii: Fix declaration made after definition A 0day randconfig uncovered an error with clang, trimmed for brevity: arch/powerpc/platforms/embedded6xx/wii.c:195:7: error: attribute declaration must precede definition [-Werror,-Wignored-attributes] if (!machine_is(wii)) ^ The macro machine_is declares mach_##name but define_machine actually defines mach_##name, hence the warning. To fix this, move define_machine after the is_machine usage. Fixes: 5a7ee3198dfa ("powerpc: wii: platform support") Reported-by: kbuild test robot Signed-off-by: Nathan Chancellor Signed-off-by: Michael Ellerman Link: https://github.com/ClangBuiltLinux/linux/issues/989 Link: https://lore.kernel.org/r/20200413190644.16757-1-natechancellor@gmail.com --- arch/powerpc/platforms/embedded6xx/wii.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 67e48b0a164e..a802ef957d63 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -172,19 +172,6 @@ static void wii_shutdown(void) flipper_quiesce(); } -define_machine(wii) { - .name = "wii", - .probe = wii_probe, - .setup_arch = wii_setup_arch, - .restart = wii_restart, - .halt = wii_halt, - .init_IRQ = wii_pic_probe, - .get_irq = flipper_pic_get_irq, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, - .machine_shutdown = wii_shutdown, -}; - static const struct of_device_id wii_of_bus[] = { { .compatible = "nintendo,hollywood", }, { }, @@ -200,3 +187,15 @@ static int __init wii_device_probe(void) } device_initcall(wii_device_probe); +define_machine(wii) { + .name = "wii", + .probe = wii_probe, + .setup_arch = wii_setup_arch, + .restart = wii_restart, + .halt = wii_halt, + .init_IRQ = wii_pic_probe, + .get_irq = flipper_pic_get_irq, + .calibrate_decr = generic_calibrate_decr, + .progress = udbg_progress, + .machine_shutdown = wii_shutdown, +}; -- cgit v1.2.3-59-g8ed1b From b8707e2374f68cac79de553ae1ee5c35913813bd Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 25 Mar 2020 15:05:46 +1100 Subject: powerpc/tm: Document h/rfid and mtmsrd quirk The ISA has a quirk that's useful for the Linux implementation. Document it here so others are less likely to trip over it. Suggested-by: Michael Ellerman Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200325040546.3091563-1-mikey@neuling.org --- Documentation/powerpc/transactional_memory.rst | 27 ++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/Documentation/powerpc/transactional_memory.rst b/Documentation/powerpc/transactional_memory.rst index 09955103acb4..b5b09bf00966 100644 --- a/Documentation/powerpc/transactional_memory.rst +++ b/Documentation/powerpc/transactional_memory.rst @@ -245,3 +245,30 @@ POWER9N DD2.2. Guest migration from POWER8 to POWER9 will work with POWER9N DD2.2 and POWER9C DD1.2. Since earlier POWER9 processors don't support TM emulation, migration from POWER8 to POWER9 is not supported there. + +Kernel implementation +===================== + +h/rfid mtmsrd quirk +------------------- + +As defined in the ISA, rfid has a quirk which is useful in early +exception handling. When in a userspace transaction and we enter the +kernel via some exception, MSR will end up as TM=0 and TS=01 (ie. TM +off but TM suspended). Regularly the kernel will want change bits in +the MSR and will perform an rfid to do this. In this case rfid can +have SRR0 TM = 0 and TS = 00 (ie. TM off and non transaction) and the +resulting MSR will retain TM = 0 and TS=01 from before (ie. stay in +suspend). This is a quirk in the architecture as this would normally +be a transition from TS=01 to TS=00 (ie. suspend -> non transactional) +which is an illegal transition. + +This quirk is described the architecture in the definition of rfid +with these lines: + + if (MSR 29:31 ¬ = 0b010 | SRR1 29:31 ¬ = 0b000) then + MSR 29:31 <- SRR1 29:31 + +hrfid and mtmsrd have the same quirk. + +The Linux kernel uses this quirk in it's early exception handling. -- cgit v1.2.3-59-g8ed1b From a0594e89c9dc8e37883cc0d6642d1baad9c0744e Mon Sep 17 00:00:00 2001 From: Andrew Donnellan Date: Wed, 26 Feb 2020 15:39:23 +1100 Subject: ocxl: Fix misleading comment In ocxl_context_free() we note that the AFU reference we're releasing was taken in "ocxl_context_init", a function that no longer exists. Fix it to say ocxl_context_alloc() instead, which is the new name for ocxl_context_init(), since it was renamed. Fixes: b9721d275cc2 ("ocxl: Allow external drivers to use OpenCAPI contexts") Signed-off-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200226043923.5481-1-ajd@linux.ibm.com --- drivers/misc/ocxl/context.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c index de8a66b9d76b..c21f65a5c762 100644 --- a/drivers/misc/ocxl/context.c +++ b/drivers/misc/ocxl/context.c @@ -287,7 +287,7 @@ void ocxl_context_free(struct ocxl_context *ctx) ocxl_afu_irq_free_all(ctx); idr_destroy(&ctx->irq_idr); - /* reference to the AFU taken in ocxl_context_init */ + /* reference to the AFU taken in ocxl_context_alloc() */ ocxl_afu_put(ctx->afu); kfree(ctx); } -- cgit v1.2.3-59-g8ed1b From 82a1b8ed5604cccf30b6ff03bcd61640cd26369b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 11 May 2020 22:58:24 +1000 Subject: powerpc/64s/hash: Add stress_slb kernel boot option to increase SLB faults This option increases the number of SLB misses by limiting the number of kernel SLB entries, and increased flushing of cached lookaside information. This helps stress test difficult to hit paths in the kernel. Reported-by: kbuild test robot Signed-off-by: Nicholas Piggin [mpe: Relocate the code into arch/powerpc/mm, s/torture/stress/] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200511125825.3081305-1-mpe@ellerman.id.au --- Documentation/admin-guide/kernel-parameters.txt | 5 + arch/powerpc/mm/book3s64/hash_utils.c | 6 + arch/powerpc/mm/book3s64/internal.h | 16 +++ arch/powerpc/mm/book3s64/slb.c | 166 +++++++++++++++++------- 4 files changed, 148 insertions(+), 45 deletions(-) create mode 100644 arch/powerpc/mm/book3s64/internal.h diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f2a93c8679e8..26ef1d74e642 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -871,6 +871,11 @@ can be useful when debugging issues that require an SLB miss to occur. + stress_slb [PPC] + Limits the number of kernel SLB entries, and flushes + them frequently to increase the rate of SLB faults + on kernel addresses. + disable= [IPV6] See Documentation/networking/ipv6.txt. diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 3d727f73a8db..622c6e8e9fa6 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -66,6 +66,9 @@ #include +#include "internal.h" + + #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else @@ -870,6 +873,9 @@ static void __init htab_initialize(void) printk(KERN_INFO "Using 1TB segments\n"); } + if (stress_slb_enabled) + static_branch_enable(&stress_slb_key); + /* * Calculate the required size of the htab. We want the number of * PTEGs to equal one half the number of real pages. diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h new file mode 100644 index 000000000000..7eda0d30d765 --- /dev/null +++ b/arch/powerpc/mm/book3s64/internal.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H +#define ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H + +#include + +extern bool stress_slb_enabled; + +DECLARE_STATIC_KEY_FALSE(stress_slb_key); + +static inline bool stress_slb(void) +{ + return static_branch_unlikely(&stress_slb_key); +} + +#endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */ diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index 716204aee3da..8141e8b40ee5 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -25,6 +25,9 @@ #include #include +#include "internal.h" + + enum slb_index { LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ KSTACK_INDEX = 1, /* Kernel stack map */ @@ -54,6 +57,17 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); } +bool stress_slb_enabled __initdata; + +static int __init parse_stress_slb(char *p) +{ + stress_slb_enabled = true; + return 0; +} +early_param("stress_slb", parse_stress_slb); + +__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key); + static void assert_slb_presence(bool present, unsigned long ea) { #ifdef CONFIG_DEBUG_VM @@ -68,7 +82,7 @@ static void assert_slb_presence(bool present, unsigned long ea) * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware * ignores all other bits from 0-27, so just clear them all. */ - ea &= ~((1UL << 28) - 1); + ea &= ~((1UL << SID_SHIFT) - 1); asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); WARN_ON(present == (tmp == 0)); @@ -153,14 +167,42 @@ void slb_flush_all_realmode(void) asm volatile("slbmte %0,%0; slbia" : : "r" (0)); } +static __always_inline void __slb_flush_and_restore_bolted(bool preserve_kernel_lookaside) +{ + struct slb_shadow *p = get_slb_shadow(); + unsigned long ksp_esid_data, ksp_vsid_data; + u32 ih; + + /* + * SLBIA IH=1 on ISA v2.05 and newer processors may preserve lookaside + * information created with Class=0 entries, which we use for kernel + * SLB entries (the SLB entries themselves are still invalidated). + * + * Older processors will ignore this optimisation. Over-invalidation + * is fine because we never rely on lookaside information existing. + */ + if (preserve_kernel_lookaside) + ih = 1; + else + ih = 0; + + ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid); + ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); + + asm volatile(PPC_SLBIA(%0)" \n" + "slbmte %1, %2 \n" + :: "i" (ih), + "r" (ksp_vsid_data), + "r" (ksp_esid_data) + : "memory"); +} + /* * This flushes non-bolted entries, it can be run in virtual mode. Must * be called with interrupts disabled. */ void slb_flush_and_restore_bolted(void) { - struct slb_shadow *p = get_slb_shadow(); - BUILD_BUG_ON(SLB_NUM_BOLTED != 2); WARN_ON(!irqs_disabled()); @@ -171,13 +213,10 @@ void slb_flush_and_restore_bolted(void) */ hard_irq_disable(); - asm volatile("isync\n" - "slbia\n" - "slbmte %0, %1\n" - "isync\n" - :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), - "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) - : "memory"); + isync(); + __slb_flush_and_restore_bolted(false); + isync(); + assert_slb_presence(true, get_paca()->kstack); get_paca()->slb_cache_ptr = 0; @@ -400,6 +439,30 @@ void preload_new_slb_context(unsigned long start, unsigned long sp) local_irq_enable(); } +static void slb_cache_slbie_kernel(unsigned int index) +{ + unsigned long slbie_data = get_paca()->slb_cache[index]; + unsigned long ksp = get_paca()->kstack; + + slbie_data <<= SID_SHIFT; + slbie_data |= 0xc000000000000000ULL; + if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data) + return; + slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT; + + asm volatile("slbie %0" : : "r" (slbie_data)); +} + +static void slb_cache_slbie_user(unsigned int index) +{ + unsigned long slbie_data = get_paca()->slb_cache[index]; + + slbie_data <<= SID_SHIFT; + slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT; + slbie_data |= SLBIE_C; /* user slbs have C=1 */ + + asm volatile("slbie %0" : : "r" (slbie_data)); +} /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) @@ -414,8 +477,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) * which would update the slb_cache/slb_cache_ptr fields in the PACA. */ hard_irq_disable(); - asm volatile("isync" : : : "memory"); - if (cpu_has_feature(CPU_FTR_ARCH_300)) { + isync(); + if (stress_slb()) { + __slb_flush_and_restore_bolted(false); + isync(); + get_paca()->slb_cache_ptr = 0; + get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; + + } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { /* * SLBIA IH=3 invalidates all Class=1 SLBEs and their * associated lookaside structures, which matches what @@ -423,47 +492,29 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) * cache. */ asm volatile(PPC_SLBIA(3)); + } else { unsigned long offset = get_paca()->slb_cache_ptr; if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && offset <= SLB_CACHE_ENTRIES) { - unsigned long slbie_data = 0; - - for (i = 0; i < offset; i++) { - unsigned long ea; - - ea = (unsigned long) - get_paca()->slb_cache[i] << SID_SHIFT; - /* - * Could assert_slb_presence(true) here, but - * hypervisor or machine check could have come - * in and removed the entry at this point. - */ - - slbie_data = ea; - slbie_data |= user_segment_size(slbie_data) - << SLBIE_SSIZE_SHIFT; - slbie_data |= SLBIE_C; /* user slbs have C=1 */ - asm volatile("slbie %0" : : "r" (slbie_data)); - } + /* + * Could assert_slb_presence(true) here, but + * hypervisor or machine check could have come + * in and removed the entry at this point. + */ + + for (i = 0; i < offset; i++) + slb_cache_slbie_user(i); /* Workaround POWER5 < DD2.1 issue */ if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1) - asm volatile("slbie %0" : : "r" (slbie_data)); + slb_cache_slbie_user(0); } else { - struct slb_shadow *p = get_slb_shadow(); - unsigned long ksp_esid_data = - be64_to_cpu(p->save_area[KSTACK_INDEX].esid); - unsigned long ksp_vsid_data = - be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); - - asm volatile(PPC_SLBIA(1) "\n" - "slbmte %0,%1\n" - "isync" - :: "r"(ksp_vsid_data), - "r"(ksp_esid_data)); + /* Flush but retain kernel lookaside information */ + __slb_flush_and_restore_bolted(true); + isync(); get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; } @@ -503,7 +554,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) * address accesses by the kernel (user mode won't happen until * rfid, which is safe). */ - asm volatile("isync" : : : "memory"); + isync(); } void slb_set_size(u16 size) @@ -571,6 +622,9 @@ static void slb_cache_update(unsigned long esid_data) if (cpu_has_feature(CPU_FTR_ARCH_300)) return; /* ISAv3.0B and later does not use slb_cache */ + if (stress_slb()) + return; + /* * Now update slb cache entries */ @@ -580,7 +634,7 @@ static void slb_cache_update(unsigned long esid_data) * We have space in slb cache for optimized switch_slb(). * Top 36 bits from esid_data as per ISA */ - local_paca->slb_cache[slb_cache_index++] = esid_data >> 28; + local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; local_paca->slb_cache_ptr++; } else { /* @@ -671,6 +725,28 @@ static long slb_insert_entry(unsigned long ea, unsigned long context, * accesses user memory before it returns to userspace with rfid. */ assert_slb_presence(false, ea); + if (stress_slb()) { + int slb_cache_index = local_paca->slb_cache_ptr; + + /* + * stress_slb() does not use slb cache, repurpose as a + * cache of inserted (non-bolted) kernel SLB entries. All + * non-bolted kernel entries are flushed on any user fault, + * or if there are already 3 non-boled kernel entries. + */ + BUILD_BUG_ON(SLB_CACHE_ENTRIES < 3); + if (!kernel || slb_cache_index == 3) { + int i; + + for (i = 0; i < slb_cache_index; i++) + slb_cache_slbie_kernel(i); + slb_cache_index = 0; + } + + if (kernel) + local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT; + local_paca->slb_cache_ptr = slb_cache_index; + } asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); barrier(); -- cgit v1.2.3-59-g8ed1b From d132443a73d7a131775df46f33000f67ed92de1e Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:43 +0000 Subject: powerpc/kasan: Fix error detection on memory allocation In case (k_start & PAGE_MASK) doesn't equal (kstart), 'va' will never be NULL allthough 'block' is NULL Check the return of memblock_alloc() directly instead of the resulting address in the loop. Fixes: 509cd3f2b473 ("powerpc/32: Simplify KASAN init") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7cb8ca82042bfc45a5cfe726c921cd7e7eeb12a3.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/kasan/kasan_init_32.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index cbcad369fcb2..8b15fe09b967 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -76,15 +76,14 @@ static int __init kasan_init_region(void *start, size_t size) return ret; block = memblock_alloc(k_end - k_start, PAGE_SIZE); + if (!block) + return -ENOMEM; for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_ptr_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); - if (!va) - return -ENOMEM; - __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); } flush_tlb_kernel_range(k_start, k_end); -- cgit v1.2.3-59-g8ed1b From 3a66a24f6060e6775f8c02ac52329ea0152d7e58 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:44 +0000 Subject: powerpc/kasan: Fix issues by lowering KASAN_SHADOW_END At the time being, KASAN_SHADOW_END is 0x100000000, which is 0 in 32 bits representation. This leads to a couple of issues: - kasan_remap_early_shadow_ro() does nothing because the comparison k_cur < k_end is always false. - In ptdump, address comparison for markers display fails and the marker's name is printed at the start of the KASAN area instead of being printed at the end. However, there is no need to shadow the KASAN shadow area itself, so the KASAN shadow area can stop shadowing memory at the start of itself. With a PAGE_OFFSET set to 0xc0000000, KASAN shadow area is then going from 0xf8000000 to 0xff000000. Fixes: cbd18991e24f ("powerpc/mm: Fix an Oops in kasan_mmu_init()") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ae1a3c0d19a37410c209c3fc453634cfcc0ee318.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/kasan.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index fbff9ff9032e..fc900937f653 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -23,9 +23,7 @@ #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET) -#define KASAN_SHADOW_END 0UL - -#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START) +#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT)) #ifdef CONFIG_KASAN void kasan_early_init(void); -- cgit v1.2.3-59-g8ed1b From d2a91cef9bbdeb87b7449fdab1a6be6000930210 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:45 +0000 Subject: powerpc/kasan: Fix shadow pages allocation failure Doing kasan pages allocation in MMU_init is too early, kernel doesn't have access yet to the entire memory space and memblock_alloc() fails when the kernel is a bit big. Do it from kasan_init() instead. Fixes: 2edb16efc899 ("powerpc/32: Add KASAN support") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c24163ee5d5f8cdf52fefa45055ceb35435b8f15.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/kasan.h | 2 -- arch/powerpc/mm/init_32.c | 2 -- arch/powerpc/mm/kasan/kasan_init_32.c | 4 +++- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index fc900937f653..4769bbf7173a 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -27,12 +27,10 @@ #ifdef CONFIG_KASAN void kasan_early_init(void); -void kasan_mmu_init(void); void kasan_init(void); void kasan_late_init(void); #else static inline void kasan_init(void) { } -static inline void kasan_mmu_init(void) { } static inline void kasan_late_init(void) { } #endif diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 872df48ae41b..a6991ef8727d 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -170,8 +170,6 @@ void __init MMU_init(void) btext_unmap(); #endif - kasan_mmu_init(); - setup_kup(); /* Shortly after that, the entire linear mapping will be available */ diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 8b15fe09b967..b7c287adfd59 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -131,7 +131,7 @@ static void __init kasan_unmap_early_shadow_vmalloc(void) flush_tlb_kernel_range(k_start, k_end); } -void __init kasan_mmu_init(void) +static void __init kasan_mmu_init(void) { int ret; struct memblock_region *reg; @@ -159,6 +159,8 @@ void __init kasan_mmu_init(void) void __init kasan_init(void) { + kasan_mmu_init(); + kasan_remap_early_shadow_ro(); clear_page(kasan_early_shadow_page); -- cgit v1.2.3-59-g8ed1b From 7c31c05e00fc5ff2067332c5f80e525573e7269c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:46 +0000 Subject: powerpc/kasan: Remove unnecessary page table locking Commit 45ff3c559585 ("powerpc/kasan: Fix parallel loading of modules.") added spinlocks to manage parallele module loading. Since then commit 47febbeeec44 ("powerpc/32: Force KASAN_VMALLOC for modules") converted the module loading to KASAN_VMALLOC. The spinlocking has then become unneeded and can be removed to simplify kasan_init_shadow_page_tables() Also remove inclusion of linux/moduleloader.h and linux/vmalloc.h which are not needed anymore since the removal of modules management. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/81a4d3aee8b82bc1355595935c8f4ad9d3b22a83.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/kasan/kasan_init_32.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index b7c287adfd59..91e2ade75192 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -5,9 +5,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -34,31 +32,22 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned { pmd_t *pmd; unsigned long k_cur, k_next; - pte_t *new = NULL; pmd = pmd_ptr_k(k_start); for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) { + pte_t *new; + k_next = pgd_addr_end(k_cur, k_end); if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) continue; - if (!new) - new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); + new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); if (!new) return -ENOMEM; kasan_populate_pte(new, PAGE_KERNEL); - - smp_wmb(); /* See comment in __pte_alloc */ - - spin_lock(&init_mm.page_table_lock); - /* Has another populated it ? */ - if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) { - pmd_populate_kernel(&init_mm, pmd, new); - new = NULL; - } - spin_unlock(&init_mm.page_table_lock); + pmd_populate_kernel(&init_mm, pmd, new); } return 0; } -- cgit v1.2.3-59-g8ed1b From 7dec42ab57f2f59feba82abf0353164479bfde4c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:47 +0000 Subject: powerpc/kasan: Refactor update of early shadow mappings kasan_remap_early_shadow_ro() and kasan_unmap_early_shadow_vmalloc() are both updating the early shadow mapping: the first one sets the mapping read-only while the other clears the mapping. Refactor and create kasan_update_early_region() Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/8c496c0828de2608c7c940c45525d177e91b6f1b.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/kasan/kasan_init_32.c | 39 ++++++++++++++++------------------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 91e2ade75192..10481d904fea 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -79,45 +79,42 @@ static int __init kasan_init_region(void *start, size_t size) return 0; } -static void __init kasan_remap_early_shadow_ro(void) +static void __init +kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) { - pgprot_t prot = kasan_prot_ro(); - unsigned long k_start = KASAN_SHADOW_START; - unsigned long k_end = KASAN_SHADOW_END; unsigned long k_cur; phys_addr_t pa = __pa(kasan_early_shadow_page); - kasan_populate_pte(kasan_early_shadow_pte, prot); - - for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) { + for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_ptr_k(k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) continue; - __set_pte_at(&init_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), prot), 0); + __set_pte_at(&init_mm, k_cur, ptep, pte, 0); } - flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END); + + flush_tlb_kernel_range(k_start, k_end); } -static void __init kasan_unmap_early_shadow_vmalloc(void) +static void __init kasan_remap_early_shadow_ro(void) { - unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START); - unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END); - unsigned long k_cur; + pgprot_t prot = kasan_prot_ro(); phys_addr_t pa = __pa(kasan_early_shadow_page); - for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); - pte_t *ptep = pte_offset_kernel(pmd, k_cur); + kasan_populate_pte(kasan_early_shadow_pte, prot); - if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) - continue; + kasan_update_early_region(KASAN_SHADOW_START, KASAN_SHADOW_END, + pfn_pte(PHYS_PFN(pa), prot)); +} - __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0); - } - flush_tlb_kernel_range(k_start, k_end); +static void __init kasan_unmap_early_shadow_vmalloc(void) +{ + unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START); + unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END); + + kasan_update_early_region(k_start, k_end, __pte(0)); } static void __init kasan_mmu_init(void) -- cgit v1.2.3-59-g8ed1b From ec97d022f621c6c850aec46d8818b49c6aae95ad Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:48 +0000 Subject: powerpc/kasan: Declare kasan_init_region() weak In order to alloc sub-arches to alloc KASAN regions using optimised methods (Huge pages on 8xx, BATs on BOOK3S, ...), declare kasan_init_region() weak. Also make kasan_init_shadow_page_tables() accessible from outside, so that it can be called from the specific kasan_init_region() functions if needed. And populate remaining KASAN address space only once performed the region mapping, to allow 8xx to allocate hugepd instead of standard page tables for mapping via 8M hugepages. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/3c1ce419fa1b5a4171b92d7fb16455ca17e1b96d.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/kasan.h | 3 +++ arch/powerpc/mm/kasan/kasan_init_32.c | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index 4769bbf7173a..107a24c3f7b3 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -34,5 +34,8 @@ static inline void kasan_init(void) { } static inline void kasan_late_init(void) { } #endif +int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end); +int kasan_init_region(void *start, size_t size); + #endif /* __ASSEMBLY */ #endif diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 10481d904fea..76d418af4ce8 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -28,7 +28,7 @@ static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot) __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0); } -static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) +int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) { pmd_t *pmd; unsigned long k_cur, k_next; @@ -52,7 +52,7 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned return 0; } -static int __init kasan_init_region(void *start, size_t size) +int __init __weak kasan_init_region(void *start, size_t size) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); @@ -122,14 +122,6 @@ static void __init kasan_mmu_init(void) int ret; struct memblock_region *reg; - if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) || - IS_ENABLED(CONFIG_KASAN_VMALLOC)) { - ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); - - if (ret) - panic("kasan: kasan_init_shadow_page_tables() failed"); - } - for_each_memblock(memory, reg) { phys_addr_t base = reg->base; phys_addr_t top = min(base + reg->size, total_lowmem); @@ -141,6 +133,15 @@ static void __init kasan_mmu_init(void) if (ret) panic("kasan: kasan_init_region() failed"); } + + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) || + IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); + + if (ret) + panic("kasan: kasan_init_shadow_page_tables() failed"); + } + } void __init kasan_init(void) -- cgit v1.2.3-59-g8ed1b From 3af4786eb429b2df76cbd7ce3bae21467ac3e4fb Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:51 +0000 Subject: powerpc/ptdump: Add _PAGE_COHERENT flag For platforms using shared.c (4xx, Book3e, Book3s/32), also handle the _PAGE_COHERENT flag which corresponds to the M bit of the WIMG flags. Signed-off-by: Christophe Leroy [mpe: Make it more verbose, use "coherent" rather than "m"] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/324c3d860717e8e91fca3bb6c0f8b23e1644a404.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/ptdump/shared.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c index f7ed2f187cb0..784f8df17f73 100644 --- a/arch/powerpc/mm/ptdump/shared.c +++ b/arch/powerpc/mm/ptdump/shared.c @@ -30,6 +30,11 @@ static const struct flag_info flag_array[] = { .val = _PAGE_PRESENT, .set = "present", .clear = " ", + }, { + .mask = _PAGE_COHERENT, + .val = _PAGE_COHERENT, + .set = "coherent", + .clear = " ", }, { .mask = _PAGE_GUARDED, .val = _PAGE_GUARDED, -- cgit v1.2.3-59-g8ed1b From 6b30830e2003d9d77696084ebe2fc19dbe7d6f70 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:52 +0000 Subject: powerpc/ptdump: Display size of BATs Display the size of areas mapped with BATs. For that, the size display for pages is refactorised. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/acf764eee231f0358e66ca9e819f052804055acc.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/ptdump/bats.c | 4 ++++ arch/powerpc/mm/ptdump/ptdump.c | 23 ++++++++++++++--------- arch/powerpc/mm/ptdump/ptdump.h | 3 +++ 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c index d3a5d6b318d1..d6c660f63d71 100644 --- a/arch/powerpc/mm/ptdump/bats.c +++ b/arch/powerpc/mm/ptdump/bats.c @@ -10,6 +10,8 @@ #include #include +#include "ptdump.h" + static char *pp_601(int k, int pp) { if (pp == 0) @@ -42,6 +44,7 @@ static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper) #else seq_printf(m, "0x%08x ", pbn); #endif + pt_dump_size(m, size); seq_printf(m, "Kernel %s User %s", pp_601(k & 2, pp), pp_601(k & 1, pp)); @@ -88,6 +91,7 @@ static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool #else seq_printf(m, "0x%08x ", brpn); #endif + pt_dump_size(m, size); if (k == 1) seq_puts(m, "User "); diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index d92bb8ea229c..1f97668853e3 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -112,6 +112,19 @@ static struct addr_marker address_markers[] = { seq_putc(m, c); \ }) +void pt_dump_size(struct seq_file *m, unsigned long size) +{ + static const char units[] = "KMGTPE"; + const char *unit = units; + + /* Work out what appropriate unit to use */ + while (!(size & 1023) && unit[1]) { + size >>= 10; + unit++; + } + pt_dump_seq_printf(m, "%9lu%c ", size, *unit); +} + static void dump_flag_info(struct pg_state *st, const struct flag_info *flag, u64 pte, int num) { @@ -146,8 +159,6 @@ static void dump_flag_info(struct pg_state *st, const struct flag_info static void dump_addr(struct pg_state *st, unsigned long addr) { - static const char units[] = "KMGTPE"; - const char *unit = units; unsigned long delta; #ifdef CONFIG_PPC64 @@ -164,13 +175,7 @@ static void dump_addr(struct pg_state *st, unsigned long addr) pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); delta = (addr - st->start_address) >> 10; } - /* Work out what appropriate unit to use */ - while (!(delta & 1023) && unit[1]) { - delta >>= 10; - unit++; - } - pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit); - + pt_dump_size(st->seq, delta); } static void note_prot_wx(struct pg_state *st, unsigned long addr) diff --git a/arch/powerpc/mm/ptdump/ptdump.h b/arch/powerpc/mm/ptdump/ptdump.h index 5d513636de73..154efae96ae0 100644 --- a/arch/powerpc/mm/ptdump/ptdump.h +++ b/arch/powerpc/mm/ptdump/ptdump.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include +#include struct flag_info { u64 mask; @@ -17,3 +18,5 @@ struct pgtable_level { }; extern struct pgtable_level pg_level[5]; + +void pt_dump_size(struct seq_file *m, unsigned long delta); -- cgit v1.2.3-59-g8ed1b From 8961a2a5353cca5451f648f4838cd848a3b2354c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:53 +0000 Subject: powerpc/ptdump: Standardise display of BAT flags Display BAT flags the same way as page flags: rwx and wimg Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a07585f353c167b8db9597d83f992a5cb4fbf4c4.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/ptdump/bats.c | 37 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c index d6c660f63d71..cebb58c7e289 100644 --- a/arch/powerpc/mm/ptdump/bats.c +++ b/arch/powerpc/mm/ptdump/bats.c @@ -15,12 +15,12 @@ static char *pp_601(int k, int pp) { if (pp == 0) - return k ? "NA" : "RWX"; + return k ? " " : "rwx"; if (pp == 1) - return k ? "ROX" : "RWX"; + return k ? "r x" : "rwx"; if (pp == 2) - return k ? "RWX" : "RWX"; - return k ? "ROX" : "ROX"; + return "rwx"; + return "r x"; } static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper) @@ -48,12 +48,9 @@ static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper) seq_printf(m, "Kernel %s User %s", pp_601(k & 2, pp), pp_601(k & 1, pp)); - if (lower & _PAGE_WRITETHRU) - seq_puts(m, "write through "); - if (lower & _PAGE_NO_CACHE) - seq_puts(m, "no cache "); - if (lower & _PAGE_COHERENT) - seq_puts(m, "coherent "); + seq_puts(m, lower & _PAGE_WRITETHRU ? "w " : " "); + seq_puts(m, lower & _PAGE_NO_CACHE ? "i " : " "); + seq_puts(m, lower & _PAGE_COHERENT ? "m " : " "); seq_puts(m, "\n"); } @@ -101,20 +98,16 @@ static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool seq_puts(m, "Kernel/User "); if (lower & BPP_RX) - seq_puts(m, is_d ? "RO " : "EXEC "); + seq_puts(m, is_d ? "r " : " x "); else if (lower & BPP_RW) - seq_puts(m, is_d ? "RW " : "EXEC "); + seq_puts(m, is_d ? "rw " : " x "); else - seq_puts(m, is_d ? "NA " : "NX "); - - if (lower & _PAGE_WRITETHRU) - seq_puts(m, "write through "); - if (lower & _PAGE_NO_CACHE) - seq_puts(m, "no cache "); - if (lower & _PAGE_COHERENT) - seq_puts(m, "coherent "); - if (lower & _PAGE_GUARDED) - seq_puts(m, "guarded "); + seq_puts(m, is_d ? " " : " "); + + seq_puts(m, lower & _PAGE_WRITETHRU ? "w " : " "); + seq_puts(m, lower & _PAGE_NO_CACHE ? "i " : " "); + seq_puts(m, lower & _PAGE_COHERENT ? "m " : " "); + seq_puts(m, lower & _PAGE_GUARDED ? "g " : " "); seq_puts(m, "\n"); } -- cgit v1.2.3-59-g8ed1b From b00ff6d8c1c3898b0f768cbb38ef722d25bd2f39 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:54 +0000 Subject: powerpc/ptdump: Properly handle non standard page size In order to properly display information regardless of the page size, it is necessary to take into account real page size. Fixes: cabe8138b23c ("powerpc: dump as a single line areas mapping a single physical page.") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a53b2a0ffd042a8d85464bf90d55bc5b970e00a1.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/ptdump/ptdump.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 1f97668853e3..98d82dcf6f0b 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -60,6 +60,7 @@ struct pg_state { unsigned long start_address; unsigned long start_pa; unsigned long last_pa; + unsigned long page_size; unsigned int level; u64 current_flags; bool check_wx; @@ -168,9 +169,9 @@ static void dump_addr(struct pg_state *st, unsigned long addr) #endif pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); - if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) { + if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) { pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa); - delta = PAGE_SIZE >> 10; + delta = st->page_size >> 10; } else { pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); delta = (addr - st->start_address) >> 10; @@ -195,7 +196,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr) } static void note_page(struct pg_state *st, unsigned long addr, - unsigned int level, u64 val) + unsigned int level, u64 val, unsigned long page_size) { u64 flag = val & pg_level[level].mask; u64 pa = val & PTE_RPN_MASK; @@ -207,6 +208,7 @@ static void note_page(struct pg_state *st, unsigned long addr, st->start_address = addr; st->start_pa = pa; st->last_pa = pa; + st->page_size = page_size; pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); /* * Dump the section of virtual memory when: @@ -218,7 +220,7 @@ static void note_page(struct pg_state *st, unsigned long addr, */ } else if (flag != st->current_flags || level != st->level || addr >= st->marker[1].start_address || - (pa != st->last_pa + PAGE_SIZE && + (pa != st->last_pa + st->page_size && (pa != st->start_pa || st->start_pa != st->last_pa))) { /* Check the PTE flags */ @@ -246,6 +248,7 @@ static void note_page(struct pg_state *st, unsigned long addr, st->start_address = addr; st->start_pa = pa; st->last_pa = pa; + st->page_size = page_size; st->current_flags = flag; st->level = level; } else { @@ -261,7 +264,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) for (i = 0; i < PTRS_PER_PTE; i++, pte++) { addr = start + i * PAGE_SIZE; - note_page(st, addr, 4, pte_val(*pte)); + note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE); } } @@ -278,7 +281,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) /* pmd exists */ walk_pte(st, pmd, addr); else - note_page(st, addr, 3, pmd_val(*pmd)); + note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE); } } @@ -294,7 +297,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) /* pud exists */ walk_pmd(st, pud, addr); else - note_page(st, addr, 2, pud_val(*pud)); + note_page(st, addr, 2, pud_val(*pud), PUD_SIZE); } } @@ -313,7 +316,7 @@ static void walk_pagetables(struct pg_state *st) /* pgd exists */ walk_pud(st, pgd, addr); else - note_page(st, addr, 1, pgd_val(*pgd)); + note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE); } } @@ -368,7 +371,7 @@ static int ptdump_show(struct seq_file *m, void *v) /* Traverse kernel page tables */ walk_pagetables(&st); - note_page(&st, 0, 0, 0); + note_page(&st, 0, 0, 0, 0); return 0; } -- cgit v1.2.3-59-g8ed1b From 6b789a26d7da2e0256d199da980369ef8fb49ec6 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:55 +0000 Subject: powerpc/ptdump: Handle hugepd at PGD level The 8xx is about to map kernel linear space and IMMR using huge pages. In order to display those pages properly, ptdump needs to handle hugepd tables at PGD level. For the time being do it only at PGD level. Further patches may add handling of hugepd tables at lower level for other platforms when needed in the future. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/630728289158dcfeb06b14d40ed7c4c4e7148cf1.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/ptdump/ptdump.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 98d82dcf6f0b..5fc880e30175 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -269,6 +270,26 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) } } +static void walk_hugepd(struct pg_state *st, hugepd_t *phpd, unsigned long start, + int pdshift, int level) +{ +#ifdef CONFIG_ARCH_HAS_HUGEPD + unsigned int i; + int shift = hugepd_shift(*phpd); + int ptrs_per_hpd = pdshift - shift > 0 ? 1 << (pdshift - shift) : 1; + + if (start & ((1 << shift) - 1)) + return; + + for (i = 0; i < ptrs_per_hpd; i++) { + unsigned long addr = start + (i << shift); + pte_t *pte = hugepte_offset(*phpd, addr, pdshift); + + note_page(st, addr, level + 1, pte_val(*pte), 1 << shift); + } +#endif +} + static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) { pmd_t *pmd = pmd_offset(pud, 0); @@ -312,11 +333,13 @@ static void walk_pagetables(struct pg_state *st) * the hash pagetable. */ for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { - if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd)) + if (pgd_none(*pgd) || pgd_is_leaf(*pgd)) + note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE); + else if (is_hugepd(__hugepd(pgd_val(*pgd)))) + walk_hugepd(st, (hugepd_t *)pgd, addr, PGDIR_SHIFT, 1); + else /* pgd exists */ walk_pud(st, pgd, addr); - else - note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE); } } -- cgit v1.2.3-59-g8ed1b From 4b19f96a81bceaf0bcf44d79c0855c61158065ec Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:56 +0000 Subject: powerpc/32s: Don't warn when mapping RO data ROX. Mapping RO data as ROX is not an issue since that data cannot be modified to introduce an exploit. PPC64 accepts to have RO data mapped ROX, as a trade off between kernel size and strictness of protection. On PPC32, kernel size is even more critical as amount of memory is usually small. Depending on the number of available IBATs, the last IBATs might overflow the end of text. Only warn if it crosses the end of RO data. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6499f8eeb2a36330e5c9fc1cee9a79374875bd54.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/book3s32/mmu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 39ba53ca5bb5..a9b2cbc74797 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -187,6 +187,7 @@ void mmu_mark_initmem_nx(void) int i; unsigned long base = (unsigned long)_stext - PAGE_OFFSET; unsigned long top = (unsigned long)_etext - PAGE_OFFSET; + unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; unsigned long size; if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) @@ -201,9 +202,10 @@ void mmu_mark_initmem_nx(void) size = block_size(base, top); size = max(size, 128UL << 10); if ((top - base) > size) { - if (strict_kernel_rwx_enabled()) - pr_warn("Kernel _etext not properly aligned\n"); size <<= 1; + if (strict_kernel_rwx_enabled() && base + size > border) + pr_warn("Some RW data is getting mapped X. " + "Adjust CONFIG_DATA_SHIFT to avoid that.\n"); } setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); base += size; -- cgit v1.2.3-59-g8ed1b From 925ac141d106b55acbe112a9272f970631a3c082 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:58 +0000 Subject: powerpc/mm: Allocate static page tables for fixmap Allocate static page tables for the fixmap area. This allows setting mappings through page tables before memblock is ready. That's needed to use early_ioremap() early and to use standard page mappings with fixmap. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/4f4b1412d34de6801b8e925cb88fc69d056ff536.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/fixmap.h | 4 ++++ arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/mm/pgtable_32.c | 16 ++++++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 2ef155a3c821..ccbe2e83c950 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -86,6 +86,10 @@ enum fixed_addresses { #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) +#define FIXMAP_ALIGNED_SIZE (ALIGN(FIXADDR_TOP, PGDIR_SIZE) - \ + ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE)) +#define FIXMAP_PTE_SIZE (FIXMAP_ALIGNED_SIZE / PGDIR_SIZE * PTE_TABLE_SIZE) + #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG #define FIXMAP_PAGE_IO PAGE_KERNEL_NCG diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 15f0a7c84944..d642e42eabb1 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -80,7 +80,7 @@ notrace void __init machine_init(u64 dt_ptr) /* Configure static keys first, now that we're relocated. */ setup_feature_keys(); - early_ioremap_setup(); + early_ioremap_init(); /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index f62de06e3d07..9934659cb871 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -29,11 +29,27 @@ #include #include #include +#include #include extern char etext[], _stext[], _sinittext[], _einittext[]; +static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; + +notrace void __init early_ioremap_init(void) +{ + unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); + pte_t *ptep = (pte_t *)early_fixmap_pagetable; + pmd_t *pmdp = pmd_ptr_k(addr); + + for (; (s32)(FIXADDR_TOP - addr) > 0; + addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) + pmd_populate_kernel(&init_mm, pmdp, ptep); + + early_ioremap_setup(); +} + static void __init *early_alloc_pgtable(unsigned long size) { void *ptr = memblock_alloc(size, size); -- cgit v1.2.3-59-g8ed1b From 4e3319c23a66dabfd6c35f4d2633d64d99b68096 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:48:59 +0000 Subject: powerpc/mm: Fix conditions to perform MMU specific management by blocks on PPC32. Setting init mem to NX shall depend on sinittext being mapped by block, not on stext being mapped by block. Setting text and rodata to RO shall depend on stext being mapped by block, not on sinittext being mapped by block. Fixes: 63b2bc619565 ("powerpc/mm/32s: Use BATs for STRICT_KERNEL_RWX") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7d565fb8f51b18a3d98445a830b2f6548cb2da2a.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/pgtable_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 9934659cb871..bd0cb6e3573e 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -185,7 +185,7 @@ void mark_initmem_nx(void) unsigned long numpages = PFN_UP((unsigned long)_einittext) - PFN_DOWN((unsigned long)_sinittext); - if (v_block_mapped((unsigned long)_stext + 1)) + if (v_block_mapped((unsigned long)_sinittext)) mmu_mark_initmem_nx(); else change_page_attr(page, numpages, PAGE_KERNEL); @@ -197,7 +197,7 @@ void mark_rodata_ro(void) struct page *page; unsigned long numpages; - if (v_block_mapped((unsigned long)_sinittext)) { + if (v_block_mapped((unsigned long)_stext + 1)) { mmu_mark_rodata_ro(); ptdump_check_wx(); return; -- cgit v1.2.3-59-g8ed1b From fadaac67c9007cad9fc485e36dcc54460d6d5886 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:00 +0000 Subject: powerpc/mm: PTE_ATOMIC_UPDATES is only for 40x Only 40x still uses PTE_ATOMIC_UPDATES. 40x cannot not select CONFIG_PTE64_BIT. Drop handling of PTE_ATOMIC_UPDATES: - In nohash/64 - In nohash/32 for CONFIG_PTE_64BIT Keep PTE_ATOMIC_UPDATES only for nohash/32 for !CONFIG_PTE_64BIT Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/d6f8e1f46583f1842de24581a68b0496feb15516.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/pgtable.h | 17 ----------------- arch/powerpc/include/asm/nohash/64/pgtable.h | 28 +--------------------------- 2 files changed, 1 insertion(+), 44 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 4315d40906a0..7e908a176e9e 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -262,25 +262,8 @@ static inline unsigned long long pte_update(pte_t *p, unsigned long clr, unsigned long set) { -#ifdef PTE_ATOMIC_UPDATES - unsigned long long old; - unsigned long tmp; - - __asm__ __volatile__("\ -1: lwarx %L0,0,%4\n\ - lwzx %0,0,%3\n\ - andc %1,%L0,%5\n\ - or %1,%1,%6\n" - PPC405_ERR77(0,%3) -" stwcx. %1,0,%4\n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) - : "cc" ); -#else /* PTE_ATOMIC_UPDATES */ unsigned long long old = pte_val(*p); *p = __pte((old & ~(unsigned long long)clr) | set); -#endif /* !PTE_ATOMIC_UPDATES */ #ifdef CONFIG_44x if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 9a33b8bd842d..9c703b140d64 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -211,22 +211,9 @@ static inline unsigned long pte_update(struct mm_struct *mm, unsigned long set, int huge) { -#ifdef PTE_ATOMIC_UPDATES - unsigned long old, tmp; - - __asm__ __volatile__( - "1: ldarx %0,0,%3 # pte_update\n\ - andc %1,%0,%4 \n\ - or %1,%1,%6\n\ - stdcx. %1,0,%3 \n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*ptep) - : "r" (ptep), "r" (clr), "m" (*ptep), "r" (set) - : "cc" ); -#else unsigned long old = pte_val(*ptep); *ptep = __pte((old & ~clr) | set); -#endif + /* huge pages use the old page table lock */ if (!huge) assert_pte_locked(mm, addr); @@ -310,21 +297,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, unsigned long bits = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); -#ifdef PTE_ATOMIC_UPDATES - unsigned long old, tmp; - - __asm__ __volatile__( - "1: ldarx %0,0,%4\n\ - or %0,%3,%0\n\ - stdcx. %0,0,%4\n\ - bne- 1b" - :"=&r" (old), "=&r" (tmp), "=m" (*ptep) - :"r" (bits), "r" (ptep), "m" (*ptep) - :"cc"); -#else unsigned long old = pte_val(*ptep); *ptep = __pte(old | bits); -#endif flush_tlb_page(vma, address); } -- cgit v1.2.3-59-g8ed1b From 2db99aeb63dd6e8808dc054d181c4d0e8645bbe0 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:01 +0000 Subject: powerpc/mm: Refactor pte_update() on nohash/32 When CONFIG_PTE_64BIT is set, pte_update() operates on 'unsigned long long' When CONFIG_PTE_64BIT is not set, pte_update() operates on 'unsigned long' In asm/page.h, we have pte_basic_t which is 'unsigned long long' when CONFIG_PTE_64BIT is set and 'unsigned long' otherwise. Refactor pte_update() using pte_basic_t. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/590d67994a2847cd9fe088f7d974499e3a18b6ac.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/pgtable.h | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 7e908a176e9e..db17f50d6ac3 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -222,12 +222,9 @@ static inline void pmd_clear(pmd_t *pmdp) * to properly flush the virtually tagged instruction cache of * those implementations. */ -#ifndef CONFIG_PTE_64BIT -static inline unsigned long pte_update(pte_t *p, - unsigned long clr, - unsigned long set) +static inline pte_basic_t pte_update(pte_t *p, unsigned long clr, unsigned long set) { -#ifdef PTE_ATOMIC_UPDATES +#if defined(PTE_ATOMIC_UPDATES) && !defined(CONFIG_PTE_64BIT) unsigned long old, tmp; __asm__ __volatile__("\ @@ -241,8 +238,8 @@ static inline unsigned long pte_update(pte_t *p, : "r" (p), "r" (clr), "r" (set), "m" (*p) : "cc" ); #else /* PTE_ATOMIC_UPDATES */ - unsigned long old = pte_val(*p); - unsigned long new = (old & ~clr) | set; + pte_basic_t old = pte_val(*p); + pte_basic_t new = (old & ~(pte_basic_t)clr) | set; #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) p->pte = p->pte1 = p->pte2 = p->pte3 = new; @@ -257,21 +254,6 @@ static inline unsigned long pte_update(pte_t *p, #endif return old; } -#else /* CONFIG_PTE_64BIT */ -static inline unsigned long long pte_update(pte_t *p, - unsigned long clr, - unsigned long set) -{ - unsigned long long old = pte_val(*p); - *p = __pte((old & ~(unsigned long long)clr) | set); - -#ifdef CONFIG_44x - if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) - icache_44x_need_flush = 1; -#endif - return old; -} -#endif /* CONFIG_PTE_64BIT */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) -- cgit v1.2.3-59-g8ed1b From 1c1bf294882bd12669e39ccd7680c4ce34b7c15c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:02 +0000 Subject: powerpc/mm: Refactor pte_update() on book3s/32 When CONFIG_PTE_64BIT is set, pte_update() operates on 'unsigned long long' When CONFIG_PTE_64BIT is not set, pte_update() operates on 'unsigned long' In asm/page.h, we have pte_basic_t which is 'unsigned long long' when CONFIG_PTE_64BIT is set and 'unsigned long' otherwise. Refactor pte_update() using pte_basic_t. While we are at it, drop the comment on 44x which is not applicable to book3s version of pte_update(). Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c78912bc8613fb249c3d80aeb1062796b5c49400.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/pgtable.h | 58 ++++++++++------------------ 1 file changed, 20 insertions(+), 38 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 0d4bccb4b9f2..d2fc324cdf07 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -253,53 +253,35 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, * and the PTE may be either 32 or 64 bit wide. In the later case, * when using atomic updates, only the low part of the PTE is * accessed atomically. - * - * In addition, on 44x, we also maintain a global flag indicating - * that an executable user mapping was modified, which is needed - * to properly flush the virtually tagged instruction cache of - * those implementations. */ -#ifndef CONFIG_PTE_64BIT -static inline unsigned long pte_update(pte_t *p, - unsigned long clr, - unsigned long set) +static inline pte_basic_t pte_update(pte_t *p, unsigned long clr, unsigned long set) { - unsigned long old, tmp; - - __asm__ __volatile__("\ -1: lwarx %0,0,%3\n\ - andc %1,%0,%4\n\ - or %1,%1,%5\n" -" stwcx. %1,0,%3\n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" (clr), "r" (set), "m" (*p) - : "cc" ); - - return old; -} -#else /* CONFIG_PTE_64BIT */ -static inline unsigned long long pte_update(pte_t *p, - unsigned long clr, - unsigned long set) -{ - unsigned long long old; + pte_basic_t old; unsigned long tmp; - __asm__ __volatile__("\ -1: lwarx %L0,0,%4\n\ - lwzx %0,0,%3\n\ - andc %1,%L0,%5\n\ - or %1,%1,%6\n" -" stwcx. %1,0,%4\n\ - bne- 1b" + __asm__ __volatile__( +#ifndef CONFIG_PTE_64BIT +"1: lwarx %0, 0, %3\n" +" andc %1, %0, %4\n" +#else +"1: lwarx %L0, 0, %3\n" +" lwz %0, -4(%3)\n" +" andc %1, %L0, %4\n" +#endif +" or %1, %1, %5\n" +" stwcx. %1, 0, %3\n" +" bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) +#ifndef CONFIG_PTE_64BIT + : "r" (p), +#else + : "b" ((unsigned long)(p) + 4), +#endif + "r" (clr), "r" (set), "m" (*p) : "cc" ); return old; } -#endif /* CONFIG_PTE_64BIT */ /* * 2.6 calls this without flushing the TLB entry; this is wrong -- cgit v1.2.3-59-g8ed1b From c7fa77016eb6093df38fdabdb7a89bb9617e7185 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:03 +0000 Subject: powerpc/mm: Standardise __ptep_test_and_clear_young() params between PPC32 and PPC64 On PPC32, __ptep_test_and_clear_young() takes the mm->context.id In preparation of standardising pte_update() params between PPC32 and PPC64, __ptep_test_and_clear_young() need mm instead of mm->context.id Replace context param by mm. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/0a65470e50a14373b7c2291184514aa982462255.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/pgtable.h | 7 ++++--- arch/powerpc/include/asm/nohash/32/pgtable.h | 5 +++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index d2fc324cdf07..25c59511fcab 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -288,18 +288,19 @@ static inline pte_basic_t pte_update(pte_t *p, unsigned long clr, unsigned long * for our hash-based implementation, we fix that up here. */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { unsigned long old; old = pte_update(ptep, _PAGE_ACCESSED, 0); if (old & _PAGE_HASHPTE) { unsigned long ptephys = __pa(ptep) & PAGE_MASK; - flush_hash_pages(context, addr, ptephys, 1); + flush_hash_pages(mm->context.id, addr, ptephys, 1); } return (old & _PAGE_ACCESSED) != 0; } #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ - __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) + __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index db17f50d6ac3..e963e6880d7c 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -256,14 +256,15 @@ static inline pte_basic_t pte_update(pte_t *p, unsigned long clr, unsigned long } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { unsigned long old; old = pte_update(ptep, _PAGE_ACCESSED, 0); return (old & _PAGE_ACCESSED) != 0; } #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ - __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) + __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, -- cgit v1.2.3-59-g8ed1b From 06f52524870122fb43b214d27e8f4546da36f8ba Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:04 +0000 Subject: powerpc/mm: Standardise pte_update() prototype between PPC32 and PPC64 PPC64 takes 3 additional parameters compared to PPC32: - mm - address - huge These 3 parameters will be needed in order to perform different action depending on the page size on the 8xx. Make pte_update() prototype identical for PPC32 and PPC64. This allows dropping an #ifdef in huge_ptep_get_and_clear(). Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/38111acf6841047a8addde37c63e92d611ee38c2.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/pgtable.h | 15 ++++++++------- arch/powerpc/include/asm/hugetlb.h | 4 ---- arch/powerpc/include/asm/nohash/32/pgtable.h | 13 +++++++------ 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 25c59511fcab..8a091d125f2d 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -218,7 +218,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #define pte_clear(mm, addr, ptep) \ - do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) + do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -254,7 +254,8 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, * when using atomic updates, only the low part of the PTE is * accessed atomically. */ -static inline pte_basic_t pte_update(pte_t *p, unsigned long clr, unsigned long set) +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) { pte_basic_t old; unsigned long tmp; @@ -292,7 +293,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old; - old = pte_update(ptep, _PAGE_ACCESSED, 0); + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); if (old & _PAGE_HASHPTE) { unsigned long ptephys = __pa(ptep) & PAGE_MASK; flush_hash_pages(mm->context.id, addr, ptephys, 1); @@ -306,14 +307,14 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); + return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); } #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_update(ptep, _PAGE_RW, 0); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); } static inline void __ptep_set_access_flags(struct vm_area_struct *vma, @@ -324,7 +325,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - pte_update(ptep, 0, set); + pte_update(vma->vm_mm, address, ptep, 0, set, 0); flush_tlb_page(vma, address); } @@ -522,7 +523,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | (pte_val(pte) & ~_PAGE_HASHPTE)); else - pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); + pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0); #elif defined(CONFIG_PTE_64BIT) /* Second case is 32-bit with 64-bit PTE. In this case, we diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index bd6504c28c2f..e4276af034e9 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -40,11 +40,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_PPC64 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); -#else - return __pte(pte_update(ptep, ~0UL, 0)); -#endif } #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index e963e6880d7c..474dd1db065f 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -166,7 +166,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #ifndef __ASSEMBLY__ #define pte_clear(mm, addr, ptep) \ - do { pte_update(ptep, ~0, 0); } while (0) + do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) #ifndef pte_mkwrite static inline pte_t pte_mkwrite(pte_t pte) @@ -222,7 +222,8 @@ static inline void pmd_clear(pmd_t *pmdp) * to properly flush the virtually tagged instruction cache of * those implementations. */ -static inline pte_basic_t pte_update(pte_t *p, unsigned long clr, unsigned long set) +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) { #if defined(PTE_ATOMIC_UPDATES) && !defined(CONFIG_PTE_64BIT) unsigned long old, tmp; @@ -260,7 +261,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old; - old = pte_update(ptep, _PAGE_ACCESSED, 0); + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); return (old & _PAGE_ACCESSED) != 0; } #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ @@ -270,7 +271,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_update(ptep, ~0, 0)); + return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); } #define __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -280,7 +281,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); unsigned long set = pte_val(pte_wrprotect(__pte(0))); - pte_update(ptep, clr, set); + pte_update(mm, addr, ptep, clr, set, 0); } static inline void __ptep_set_access_flags(struct vm_area_struct *vma, @@ -293,7 +294,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, unsigned long set = pte_val(entry) & pte_val(pte_set); unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); - pte_update(ptep, clr, set); + pte_update(vma->vm_mm, address, ptep, clr, set, 0); flush_tlb_page(vma, address); } -- cgit v1.2.3-59-g8ed1b From 6ad41bfbc907be0cd414f09fa5382d2133376595 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:05 +0000 Subject: powerpc/mm: Create a dedicated pte_update() for 8xx pte_update() is a bit special for the 8xx. At the time being, that's an #ifdef inside the nohash/32 pte_update(). As we are going to make it even more special in the coming patches, create a dedicated version for pte_update() for 8xx. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a103be0099ac2360f8c44f4a1a63cc03713a1360.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/pgtable.h | 29 ++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 474dd1db065f..5fb3f6798e22 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -221,7 +221,31 @@ static inline void pmd_clear(pmd_t *pmdp) * that an executable user mapping was modified, which is needed * to properly flush the virtually tagged instruction cache of * those implementations. + * + * On the 8xx, the page tables are a bit special. For 16k pages, we have + * 4 identical entries. For other page sizes, we have a single entry in the + * table. */ +#ifdef CONFIG_PPC_8xx +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) +{ + pte_basic_t *entry = &p->pte; + pte_basic_t old = pte_val(*p); + pte_basic_t new = (old & ~(pte_basic_t)clr) | set; + int num, i; + + if (!huge) + num = PAGE_SIZE / SZ_4K; + else + num = 1; + + for (i = 0; i < num; i++, entry++) + *entry = new; + + return old; +} +#else static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, unsigned long clr, unsigned long set, int huge) { @@ -242,11 +266,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p pte_basic_t old = pte_val(*p); pte_basic_t new = (old & ~(pte_basic_t)clr) | set; -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) - p->pte = p->pte1 = p->pte2 = p->pte3 = new; -#else *p = __pte(new); -#endif #endif /* !PTE_ATOMIC_UPDATES */ #ifdef CONFIG_44x @@ -255,6 +275,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p #endif return old; } +#endif #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int __ptep_test_and_clear_young(struct mm_struct *mm, -- cgit v1.2.3-59-g8ed1b From b12c07a4bb064c0a8db7554557b89d40f57c936f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:06 +0000 Subject: powerpc/mm: Reduce hugepd size for 8M hugepages on 8xx Commit 55c8fc3f4930 ("powerpc/8xx: reintroduce 16K pages with HW assistance") redefined pte_t as a struct of 4 pte_basic_t, because in 16K pages mode there are four identical entries in the page table. But hugepd entries for 8M pages require only one entry of size pte_basic_t. So there is no point in creating a cache for 4 entries page tables. Calculate PTE_T_ORDER using the size of pte_basic_t instead of pte_t. Define specific huge_pte helpers (set_huge_pte_at(), huge_pte_clear(), huge_ptep_set_wrprotect()) to write the pte in a single entry instead of using set_pte_at() which writes 4 identical entries in 16k pages mode. Also make sure that __ptep_set_access_flags() properly handle the huge_pte case. Define set_pte_filter() inline otherwise GCC doesn't inline it anymore because it is now used twice, and that gives a pretty suboptimal code because of pte_t being a struct of 4 entries. Those functions are also used for 512k pages which only require one entry as well allthough replicating it four times was harmless as 512k pages entries are spread every 128 bytes in the table. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/43050d1a0c2d6e1541cab9c1126fc80bc7015ebd.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h | 20 ++++++++++++++++++ arch/powerpc/include/asm/nohash/32/pgtable.h | 3 ++- arch/powerpc/mm/hugetlbpage.c | 3 ++- arch/powerpc/mm/pgtable.c | 26 ++++++++++++++++++++---- 4 files changed, 46 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index a46616937d20..785437323576 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -41,4 +41,24 @@ static inline int check_and_get_huge_psize(int shift) return shift_to_mmu_psize(shift); } +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); + +#define __HAVE_ARCH_HUGE_PTE_CLEAR +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + pte_update(mm, addr, ptep, ~0UL, 0, 1); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); + unsigned long set = pte_val(pte_wrprotect(__pte(0))); + + pte_update(mm, addr, ptep, clr, set, 1); +} + #endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 5fb3f6798e22..ff78bf25f832 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -314,8 +314,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0))))); unsigned long set = pte_val(entry) & pte_val(pte_set); unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); + int huge = psize > mmu_virtual_psize ? 1 : 0; - pte_update(vma->vm_mm, address, ptep, clr, set, 0); + pte_update(vma->vm_mm, address, ptep, clr, set, huge); flush_tlb_page(vma, address); } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index d06efb946c7d..521929a371af 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -30,7 +30,8 @@ bool hugetlb_disabled = false; #define hugepd_none(hpd) (hpd_val(hpd) == 0) -#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *))) +#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \ + __builtin_ffs(sizeof(void *))) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index e3759b69f81b..214a5f4beb6c 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -100,7 +100,7 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; } * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ -static pte_t set_pte_filter(pte_t pte) +static inline pte_t set_pte_filter(pte_t pte) { struct page *pg; @@ -249,16 +249,34 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, #else /* - * Not used on non book3s64 platforms. But 8xx - * can possibly use tsize derived from hstate. + * Not used on non book3s64 platforms. + * 8xx compares it with mmu_virtual_psize to + * know if it is a huge page or not. */ - psize = 0; + psize = MMU_PAGE_COUNT; #endif __ptep_set_access_flags(vma, ptep, pte, addr, psize); } return changed; #endif } + +#if defined(CONFIG_PPC_8xx) +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) +{ + /* + * Make sure hardware valid bit is not set. We don't do + * tlb flush for this update. + */ + VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); + + pte = pte_mkpte(pte); + + pte = set_pte_filter(pte); + + ptep->pte = pte_val(pte); +} +#endif #endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_DEBUG_VM -- cgit v1.2.3-59-g8ed1b From d3efcd38c0b99162d889e36a30425345a18edb33 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:07 +0000 Subject: powerpc/8xx: Drop CONFIG_8xx_COPYBACK option CONFIG_8xx_COPYBACK was there to help disabling copyback cache mode for debuging hardware. But nobody will design new boards with 8xx now. All 8xx platforms select it, so make it the default and remove the option. Also remove the Mx_RESETVAL values which are pretty useless and hide the real value while reading code. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/bcc968cda075516eb76e2f25e09821f582c566b4.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/configs/adder875_defconfig | 1 - arch/powerpc/configs/ep88xc_defconfig | 1 - arch/powerpc/configs/mpc866_ads_defconfig | 1 - arch/powerpc/configs/mpc885_ads_defconfig | 1 - arch/powerpc/configs/tqm8xx_defconfig | 1 - arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 2 -- arch/powerpc/kernel/head_8xx.S | 15 +-------------- arch/powerpc/platforms/8xx/Kconfig | 9 --------- 8 files changed, 1 insertion(+), 30 deletions(-) diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig index f55e23cb176c..5326bc739279 100644 --- a/arch/powerpc/configs/adder875_defconfig +++ b/arch/powerpc/configs/adder875_defconfig @@ -10,7 +10,6 @@ CONFIG_EXPERT=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y CONFIG_PPC_ADDER875=y -CONFIG_8xx_COPYBACK=y CONFIG_GEN_RTC=y CONFIG_HZ_1000=y # CONFIG_SECCOMP is not set diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig index 0e2e5e81a359..f5c3e72da719 100644 --- a/arch/powerpc/configs/ep88xc_defconfig +++ b/arch/powerpc/configs/ep88xc_defconfig @@ -12,7 +12,6 @@ CONFIG_EXPERT=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y CONFIG_PPC_EP88XC=y -CONFIG_8xx_COPYBACK=y CONFIG_GEN_RTC=y CONFIG_HZ_100=y # CONFIG_SECCOMP is not set diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig index 5320735395e7..5c56d36cdfc5 100644 --- a/arch/powerpc/configs/mpc866_ads_defconfig +++ b/arch/powerpc/configs/mpc866_ads_defconfig @@ -12,7 +12,6 @@ CONFIG_EXPERT=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y CONFIG_MPC86XADS=y -CONFIG_8xx_COPYBACK=y CONFIG_GEN_RTC=y CONFIG_HZ_1000=y CONFIG_MATH_EMULATION=y diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 82a008c04eae..949ff9ccda5e 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -11,7 +11,6 @@ CONFIG_EXPERT=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y -CONFIG_8xx_COPYBACK=y CONFIG_GEN_RTC=y CONFIG_HZ_100=y # CONFIG_SECCOMP is not set diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig index eda8bfb2d0a3..77857d513022 100644 --- a/arch/powerpc/configs/tqm8xx_defconfig +++ b/arch/powerpc/configs/tqm8xx_defconfig @@ -15,7 +15,6 @@ CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y CONFIG_TQM8XX=y -CONFIG_8xx_COPYBACK=y # CONFIG_8xx_CPU15 is not set CONFIG_GEN_RTC=y CONFIG_HZ_100=y diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 76af5b0cb16e..26b7cee34dfe 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -19,7 +19,6 @@ #define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */ #define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */ #define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */ -#define MI_RESETVAL 0x00000000 /* Value of register at reset */ /* These are the Ks and Kp from the PowerPC books. For proper operation, * Ks = 0, Kp = 1. @@ -95,7 +94,6 @@ #define MD_TWAM 0x04000000 /* Use 4K page hardware assist */ #define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */ #define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */ -#define MD_RESETVAL 0x04000000 /* Value of register at reset */ #define SPRN_M_CASID 793 /* Address space ID (context) to match */ #define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 073a651787df..905205c79a25 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -779,10 +779,7 @@ start_here: initial_mmu: li r8, 0 mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */ - lis r10, MD_RESETVAL@h -#ifndef CONFIG_8xx_COPYBACK - oris r10, r10, MD_WTDEF@h -#endif + lis r10, MD_TWAM@h mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */ tlbia /* Invalidate all TLB entries */ @@ -857,17 +854,7 @@ initial_mmu: mtspr SPRN_DC_CST, r8 lis r8, IDC_ENABLE@h mtspr SPRN_IC_CST, r8 -#ifdef CONFIG_8xx_COPYBACK - mtspr SPRN_DC_CST, r8 -#else - /* For a debug option, I left this here to easily enable - * the write through cache mode - */ - lis r8, DC_SFWT@h mtspr SPRN_DC_CST, r8 - lis r8, IDC_ENABLE@h - mtspr SPRN_DC_CST, r8 -#endif /* Disable debug mode entry on breakpoints */ mfspr r8, SPRN_DER #ifdef CONFIG_PERF_EVENTS diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index e0fe670f06f6..b37de62d7e7f 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -98,15 +98,6 @@ menu "MPC8xx CPM Options" # 8xx specific questions. comment "Generic MPC8xx Options" -config 8xx_COPYBACK - bool "Copy-Back Data Cache (else Writethrough)" - help - Saying Y here will cause the cache on an MPC8xx processor to be used - in Copy-Back mode. If you say N here, it is used in Writethrough - mode. - - If in doubt, say Y here. - config 8xx_GPIO bool "GPIO API Support" select GPIOLIB -- cgit v1.2.3-59-g8ed1b From a891c43b97d315ee5f9fe8e797d3d48fc351e053 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:08 +0000 Subject: powerpc/8xx: Prepare handlers for _PAGE_HUGE for 512k pages. Prepare ITLB handler to handle _PAGE_HUGE when CONFIG_HUGETLBFS is enabled. This means that the L1 entry has to be kept in r11 until L2 entry is read, in order to insert _PAGE_HUGE into it. Also move pgd_offset helpers before pte_update() as they will be needed there in next patch. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/21fd1de8fba781bededa9474a5a9374aefb1f849.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/pgtable.h | 13 ++++++------- arch/powerpc/kernel/head_8xx.S | 15 +++++++++------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index ff78bf25f832..9a287a95acad 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -206,6 +206,12 @@ static inline void pmd_clear(pmd_t *pmdp) } +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* to find an entry in a page-table-directory */ +#define pgd_index(address) ((address) >> PGDIR_SHIFT) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) /* * PTE updates. This function is called whenever an existing @@ -348,13 +354,6 @@ static inline int pte_young(pte_t pte) pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) #endif -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - -/* to find an entry in a page-table-directory */ -#define pgd_index(address) ((address) >> PGDIR_SHIFT) -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) - /* Find an entry in the third-level page table.. */ #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 905205c79a25..adad8baadcf5 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -196,7 +196,7 @@ SystemCall: InstructionTLBMiss: mtspr SPRN_SPRG_SCRATCH0, r10 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) +#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS) mtspr SPRN_SPRG_SCRATCH1, r11 #endif @@ -235,16 +235,19 @@ InstructionTLBMiss: rlwinm r10, r10, 0, 20, 31 oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: + mtcr r11 #endif +#ifdef CONFIG_HUGETLBFS + lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ + mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ + mtspr SPRN_MD_TWC, r11 +#else lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MI_TWC, r10 /* Set segment attributes */ - mtspr SPRN_MD_TWC, r10 +#endif mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ -#ifdef ITLB_MISS_KERNEL - mtcr r11 -#endif #ifdef CONFIG_SWAP rlwinm r11, r10, 32-5, _PAGE_PRESENT and r11, r11, r10 @@ -263,7 +266,7 @@ InstructionTLBMiss: /* Restore registers */ 0: mfspr r10, SPRN_SPRG_SCRATCH0 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) +#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS) mfspr r11, SPRN_SPRG_SCRATCH1 #endif rfi -- cgit v1.2.3-59-g8ed1b From b250c8c08c79d1eb5354c7eaa84b7505f5f2d921 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:09 +0000 Subject: powerpc/8xx: Manage 512k huge pages as standard pages. At the time being, 512k huge pages are handled through hugepd page tables. The PMD entry is flagged as a hugepd pointer and it means that only 512k hugepages can be managed in that 4M block. However, the hugepd table has the same size as a normal page table, and 512k entries can therefore be nested with normal pages. On the 8xx, TLB loading is performed by software and allthough the page tables are organised to match the L1 and L2 level defined by the HW, all TLB entries have both L1 and L2 independent entries. It means that even if two TLB entries are associated with the same PMD entry, they can be loaded with different values in L1 part. The L1 entry contains the page size (PS field): - 00 for 4k and 16 pages - 01 for 512k pages - 11 for 8M pages By adding a flag for hugepages in the PTE (_PAGE_HUGE) and copying it into the lower bit of PS, we can then manage 512k pages with normal page tables: - PMD entry has PS=11 for 8M pages - PMD entry has PS=00 for other pages. As a PMD entry covers 4M areas, a PMD will either point to a hugepd table having a single entry to an 8M page, or the PMD will point to a standard page table which will have either entries to 4k or 16k or 512k pages. For 512k pages, as the L1 entry will not know it is a 512k page before the PTE is read, there will be 128 entries in the PTE as if it was 4k pages. But when loading the TLB, it will be flagged as a 512k page. Note that we can't use pmd_ptr() in asm/nohash/32/pgtable.h because it is not defined yet. In ITLB miss, we keep the possibility to opt it out as when kernel text is pinned and no user hugepages are used, we can save several instruction by not using r11. In DTLB miss, that's just one instruction so it's not worth bothering with it. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/002819e8e166bf81d24b24782d98de7c40905d8f.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/pgtable.h | 10 +++++++--- arch/powerpc/include/asm/nohash/32/pte-8xx.h | 4 +++- arch/powerpc/include/asm/nohash/pgtable.h | 2 +- arch/powerpc/kernel/head_8xx.S | 12 +++++------- arch/powerpc/mm/hugetlbpage.c | 22 +++++++++++++++++++--- arch/powerpc/mm/pgtable.c | 10 +++++++++- 6 files changed, 44 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9a287a95acad..717f995d21b8 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -229,8 +229,9 @@ static inline void pmd_clear(pmd_t *pmdp) * those implementations. * * On the 8xx, the page tables are a bit special. For 16k pages, we have - * 4 identical entries. For other page sizes, we have a single entry in the - * table. + * 4 identical entries. For 512k pages, we have 128 entries as if it was + * 4k pages, but they are flagged as 512k pages for the hardware. + * For other page sizes, we have a single entry in the table. */ #ifdef CONFIG_PPC_8xx static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, @@ -240,13 +241,16 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p pte_basic_t old = pte_val(*p); pte_basic_t new = (old & ~(pte_basic_t)clr) | set; int num, i; + pmd_t *pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); if (!huge) num = PAGE_SIZE / SZ_4K; + else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) != _PMD_PAGE_8M) + num = SZ_512K / SZ_4K; else num = 1; - for (i = 0; i < num; i++, entry++) + for (i = 0; i < num; i++, entry++, new += SZ_4K) *entry = new; return old; diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index c9e4b2d90f65..66f403a7da44 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -46,6 +46,8 @@ #define _PAGE_NA 0x0200 /* Supervisor NA, User no access */ #define _PAGE_RO 0x0600 /* Supervisor RO, User no access */ +#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */ + /* cache related flags non existing on 8xx */ #define _PAGE_COHERENT 0 #define _PAGE_WRITETHRU 0 @@ -128,7 +130,7 @@ static inline pte_t pte_mkuser(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_SPS); + return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE); } #define pte_mkhuge pte_mkhuge diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 7fed9dc0f147..f27c967d9269 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -267,7 +267,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, static inline int hugepd_ok(hugepd_t hpd) { #ifdef CONFIG_PPC_8xx - return ((hpd_val(hpd) & 0x4) != 0); + return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M); #else /* We clear the top bit to indicate hugepd */ return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index adad8baadcf5..423465b10c82 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -239,7 +239,6 @@ InstructionTLBMiss: #endif #ifdef CONFIG_HUGETLBFS lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ - mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ mtspr SPRN_MD_TWC, r11 #else lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ @@ -248,6 +247,10 @@ InstructionTLBMiss: #endif mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ +#ifdef CONFIG_HUGETLBFS + rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K + mtspr SPRN_MI_TWC, r11 +#endif #ifdef CONFIG_SWAP rlwinm r11, r10, 32-5, _PAGE_PRESENT and r11, r11, r10 @@ -353,6 +356,7 @@ DataStoreTLBMiss: * above. */ rlwimi r11, r10, 0, _PAGE_GUARDED + rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K mtspr SPRN_MD_TWC, r11 /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. @@ -584,7 +588,6 @@ FixupDAR:/* Entry point for dcbx workaround. */ mfspr r11, SPRN_MD_TWC lwz r11, 0(r11) /* Get the pte */ bt 28,200f /* bit 28 = Large page (8M) */ - bt 29,202f /* bit 29 = Large page (8M or 512K) */ /* concat physical page address(r11) and page offset(r10) */ rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31 201: lwz r11,0(r11) @@ -611,11 +614,6 @@ FixupDAR:/* Entry point for dcbx workaround. */ rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31 b 201b -202: - /* concat physical page address(r11) and page offset(r10) */ - rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31 - b 201b - 144: mfspr r10, SPRN_DSISR rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ mtspr SPRN_DSISR, r10 diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 521929a371af..38bad839e608 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -189,6 +189,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz if (!hpdp) return NULL; + if (IS_ENABLED(CONFIG_PPC_8xx) && sz == SZ_512K) + return pte_alloc_map(mm, (pmd_t *)hpdp, addr); + BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, @@ -331,13 +334,20 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif if (shift >= pdshift) hugepd_free(tlb, hugepte); - else if (IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_free_tlb(tlb, hugepte, 0); else pgtable_free_tlb(tlb, hugepte, get_hugepd_cache_index(pdshift - shift)); } +static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) +{ + pgtable_t token = pmd_pgtable(*pmd); + + pmd_clear(pmd); + pte_free_tlb(tlb, token, addr); + mm_dec_nr_ptes(tlb->mm); +} + static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) @@ -353,11 +363,17 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { + if (pmd_none_or_clear_bad(pmd)) + continue; + /* * if it is not hugepd pointer, we should already find * it cleared. */ - WARN_ON(!pmd_none_or_clear_bad(pmd)); + WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx)); + + hugetlb_free_pte_range(tlb, pmd, addr); + continue; } /* diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 214a5f4beb6c..60c4b8ff046c 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -264,6 +264,12 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, #if defined(CONFIG_PPC_8xx) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { + pmd_t *pmd = pmd_ptr(mm, addr); + pte_basic_t val; + pte_basic_t *entry = &ptep->pte; + int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K; + int i; + /* * Make sure hardware valid bit is not set. We don't do * tlb flush for this update. @@ -274,7 +280,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_ pte = set_pte_filter(pte); - ptep->pte = pte_val(pte); + val = pte_val(pte); + for (i = 0; i < num; i++, entry++, val += SZ_4K) + *entry = val; } #endif #endif /* CONFIG_HUGETLB_PAGE */ -- cgit v1.2.3-59-g8ed1b From d4870b89acd7c362ded08f9295e8d143cf7e0024 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:10 +0000 Subject: powerpc/8xx: Only 8M pages are hugepte pages now 512k pages are now standard pages, so only 8M pages are hugepte. No more handling of normal page tables through hugepd allocation and freeing, and hugepte helpers can also be simplified. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/2c6135d57fb76eebf70673fbac3dc9e740767879.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h | 7 +++---- arch/powerpc/mm/hugetlbpage.c | 16 +++------------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index 785437323576..1c7d4693a78e 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -13,13 +13,13 @@ static inline pte_t *hugepd_page(hugepd_t hpd) static inline unsigned int hugepd_shift(hugepd_t hpd) { - return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17; + return PAGE_SHIFT_8M; } static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, unsigned int pdshift) { - unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT; + unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT; return hugepd_page(hpd) + idx; } @@ -32,8 +32,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) { - *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | - (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K)); + *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M); } static inline int check_and_get_huge_psize(int shift) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 38bad839e608..cfacd364c7aa 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -54,24 +54,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (pshift >= pdshift) { cachep = PGT_CACHE(PTE_T_ORDER); num_hugepd = 1 << (pshift - pdshift); - new = NULL; - } else if (IS_ENABLED(CONFIG_PPC_8xx)) { - cachep = NULL; - num_hugepd = 1; - new = pte_alloc_one(mm); } else { cachep = PGT_CACHE(pdshift - pshift); num_hugepd = 1; - new = NULL; } - if (!cachep && !new) { + if (!cachep) { WARN_ONCE(1, "No page table cache created for hugetlb tables"); return -ENOMEM; } - if (cachep) - new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); + new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); @@ -102,10 +95,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); - if (cachep) - kmem_cache_free(cachep, new); - else - pte_free(mm, new); + kmem_cache_free(cachep, new); } else { kmemleak_ignore(new); } -- cgit v1.2.3-59-g8ed1b From 555904d07eef3a2e5fc458419edf6174362c4ddd Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:11 +0000 Subject: powerpc/8xx: MM_SLICE is not needed anymore As the 8xx now manages 512k pages in standard page tables, it doesn't need CONFIG_PPC_MM_SLICES anymore. Don't select it anymore and remove all related code. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/98e8ccd424476ea73cced2b89ba38eb2ed8144fb.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 64 ---------------------------- arch/powerpc/include/asm/nohash/32/slice.h | 20 --------- arch/powerpc/include/asm/slice.h | 2 - arch/powerpc/platforms/Kconfig.cputype | 1 - 4 files changed, 87 deletions(-) delete mode 100644 arch/powerpc/include/asm/nohash/32/slice.h diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 26b7cee34dfe..a092e6434bda 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -176,12 +176,6 @@ */ #define SPRN_M_TW 799 -#ifdef CONFIG_PPC_MM_SLICES -#include -#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1)) -#define LOW_SLICE_ARRAY_SZ SLICE_ARRAY_SIZE -#endif - #if defined(CONFIG_PPC_4K_PAGES) #define mmu_virtual_psize MMU_PAGE_4K #elif defined(CONFIG_PPC_16K_PAGES) @@ -199,71 +193,13 @@ #include -struct slice_mask { - u64 low_slices; - DECLARE_BITMAP(high_slices, 0); -}; - typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; -#ifdef CONFIG_PPC_MM_SLICES - u16 user_psize; /* page size index */ - unsigned char low_slices_psize[SLICE_ARRAY_SIZE]; - unsigned char high_slices_psize[0]; - unsigned long slb_addr_limit; - struct slice_mask mask_base_psize; /* 4k or 16k */ - struct slice_mask mask_512k; - struct slice_mask mask_8m; -#endif void *pte_frag; } mm_context_t; -#ifdef CONFIG_PPC_MM_SLICES -static inline u16 mm_ctx_user_psize(mm_context_t *ctx) -{ - return ctx->user_psize; -} - -static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize) -{ - ctx->user_psize = user_psize; -} - -static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx) -{ - return ctx->low_slices_psize; -} - -static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx) -{ - return ctx->high_slices_psize; -} - -static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx) -{ - return ctx->slb_addr_limit; -} - -static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit) -{ - ctx->slb_addr_limit = limit; -} - -static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize) -{ - if (psize == MMU_PAGE_512K) - return &ctx->mask_512k; - if (psize == MMU_PAGE_8M) - return &ctx->mask_8m; - - BUG_ON(psize != mmu_virtual_psize); - - return &ctx->mask_base_psize; -} -#endif /* CONFIG_PPC_MM_SLICE */ - #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h deleted file mode 100644 index 39eb0154ae2d..000000000000 --- a/arch/powerpc/include/asm/nohash/32/slice.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H -#define _ASM_POWERPC_NOHASH_32_SLICE_H - -#ifdef CONFIG_PPC_MM_SLICES - -#define SLICE_LOW_SHIFT 26 /* 64 slices */ -#define SLICE_LOW_TOP (0x100000000ull) -#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) -#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) - -#define SLICE_HIGH_SHIFT 0 -#define SLICE_NUM_HIGH 0ul -#define GET_HIGH_SLICE_INDEX(addr) (addr & 0) - -#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW - -#endif /* CONFIG_PPC_MM_SLICES */ - -#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */ diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h index c6f466f4c241..0bdd9c62eca0 100644 --- a/arch/powerpc/include/asm/slice.h +++ b/arch/powerpc/include/asm/slice.h @@ -4,8 +4,6 @@ #ifdef CONFIG_PPC_BOOK3S_64 #include -#elif defined(CONFIG_PPC_MMU_NOHASH_32) -#include #endif #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 0c3c1902135c..b0587b833517 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -55,7 +55,6 @@ config PPC_8xx select SYS_SUPPORTS_HUGETLBFS select PPC_HAVE_KUEP select PPC_HAVE_KUAP - select PPC_MM_SLICES if HUGETLB_PAGE select HAVE_ARCH_VMAP_STACK config 40x -- cgit v1.2.3-59-g8ed1b From 5d4656696c30cef56b2ab506b203533c818af04d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:12 +0000 Subject: powerpc/8xx: Move PPC_PIN_TLB options into 8xx Kconfig PPC_PIN_TLB options are dedicated to the 8xx, move them into the 8xx Kconfig. While we are at it, add some text to explain what it does. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1ece39fac6312e1d14e6a67b3f9d9f9f91990a7b.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 20 ------------------- arch/powerpc/platforms/8xx/Kconfig | 41 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 30e2111ca15d..1d4ef4f27dec 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -1227,26 +1227,6 @@ config TASK_SIZE hex "Size of user task space" if TASK_SIZE_BOOL default "0x80000000" if PPC_8xx default "0xc0000000" - -config PIN_TLB - bool "Pinned Kernel TLBs (860 ONLY)" - depends on ADVANCED_OPTIONS && PPC_8xx && \ - !DEBUG_PAGEALLOC && !STRICT_KERNEL_RWX - -config PIN_TLB_DATA - bool "Pinned TLB for DATA" - depends on PIN_TLB - default y - -config PIN_TLB_IMMR - bool "Pinned TLB for IMMR" - depends on PIN_TLB || PPC_EARLY_DEBUG_CPM - default y - -config PIN_TLB_TEXT - bool "Pinned TLB for TEXT" - depends on PIN_TLB - default y endmenu if PPC64 diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index b37de62d7e7f..0d036cd868ef 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -162,4 +162,45 @@ config UCODE_PATCH default y depends on !NO_UCODE_PATCH +menu "8xx advanced setup" + depends on PPC_8xx + +config PIN_TLB + bool "Pinned Kernel TLBs" + depends on ADVANCED_OPTIONS && !DEBUG_PAGEALLOC && !STRICT_KERNEL_RWX + help + On the 8xx, we have 32 instruction TLBs and 32 data TLBs. In each + table 4 TLBs can be pinned. + + It reduces the amount of usable TLBs to 28 (ie by 12%). That's the + reason why we make it selectable. + + This option does nothing, it just activate the selection of what + to pin. + +config PIN_TLB_DATA + bool "Pinned TLB for DATA" + depends on PIN_TLB + default y + help + This pins the first 32 Mbytes of memory with 8M pages. + +config PIN_TLB_IMMR + bool "Pinned TLB for IMMR" + depends on PIN_TLB || PPC_EARLY_DEBUG_CPM + default y + help + This pins the IMMR area with a 512kbytes page. In case + CONFIG_PIN_TLB_DATA is also selected, it will reduce + CONFIG_PIN_TLB_DATA to 24 Mbytes. + +config PIN_TLB_TEXT + bool "Pinned TLB for TEXT" + depends on PIN_TLB + default y + help + This pins kernel text with 8M pages. + +endmenu + endmenu -- cgit v1.2.3-59-g8ed1b From f76c8f6d257cefda60221c83af7f97d9f74cb3ce Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:13 +0000 Subject: powerpc/8xx: Add function to set pinned TLBs Pinned TLBs cannot be modified when the MMU is enabled. Create a function to rewrite the pinned TLB entries with MMU off. To set pinned TLB, we have to turn off MMU, disable pinning, do a TLB flush (Either with tlbie and tlbia) then reprogam the TLB entries, enable pinning and turn on MMU. If using tlbie, it cleared entries in both instruction and data TLB regardless whether pinning is disabled or not. If using tlbia, it clears all entries of the TLB which has disabled pinning. To make it easy, just clear all entries in both TLBs, and reprogram them. The function takes two arguments, the top of the memory to consider and whether data is RO under _sinittext. When DEBUG_PAGEALLOC is set, the top is the end of kernel rodata. Otherwise, that's the top of physical RAM. Everything below _sinittext is set RX, over _sinittext that's RW. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c17806014bb1c06513ad1e1d510faea31984b177.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 2 + arch/powerpc/kernel/head_8xx.S | 103 +++++++++++++++++++++++++++ 2 files changed, 105 insertions(+) diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index a092e6434bda..4d3ef3841b00 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -193,6 +193,8 @@ #include +void mmu_pin_tlb(unsigned long top, bool readonly); + typedef struct { unsigned int id; unsigned int active; diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 423465b10c82..c9e3d54e6a6f 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -866,6 +867,108 @@ initial_mmu: mtspr SPRN_DER, r8 blr +#ifdef CONFIG_PIN_TLB +_GLOBAL(mmu_pin_tlb) + lis r9, (1f - PAGE_OFFSET)@h + ori r9, r9, (1f - PAGE_OFFSET)@l + mfmsr r10 + mflr r11 + li r12, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI) + rlwinm r0, r10, 0, ~MSR_RI + rlwinm r0, r0, 0, ~MSR_EE + mtmsr r0 + isync + .align 4 + mtspr SPRN_SRR0, r9 + mtspr SPRN_SRR1, r12 + rfi +1: + li r5, 0 + lis r6, MD_TWAM@h + mtspr SPRN_MI_CTR, r5 + mtspr SPRN_MD_CTR, r6 + tlbia + +#ifdef CONFIG_PIN_TLB_TEXT + LOAD_REG_IMMEDIATE(r5, 28 << 8) + LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) + LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG) + LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) + LOAD_REG_ADDR(r9, _sinittext) + li r0, 4 + mtctr r0 + +2: ori r0, r6, MI_EVALID + mtspr SPRN_MI_CTR, r5 + mtspr SPRN_MI_EPN, r0 + mtspr SPRN_MI_TWC, r7 + mtspr SPRN_MI_RPN, r8 + addi r5, r5, 0x100 + addis r6, r6, SZ_8M@h + addis r8, r8, SZ_8M@h + cmplw r6, r9 + bdnzt lt, 2b + lis r0, MI_RSV4I@h + mtspr SPRN_MI_CTR, r0 +#endif + LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM) +#ifdef CONFIG_PIN_TLB_DATA + LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) + LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG) +#ifdef CONFIG_PIN_TLB_IMMR + li r0, 3 +#else + li r0, 4 +#endif + mtctr r0 + cmpwi r4, 0 + beq 4f + LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) + LOAD_REG_ADDR(r9, _sinittext) + +2: ori r0, r6, MD_EVALID + mtspr SPRN_MD_CTR, r5 + mtspr SPRN_MD_EPN, r0 + mtspr SPRN_MD_TWC, r7 + mtspr SPRN_MD_RPN, r8 + addi r5, r5, 0x100 + addis r6, r6, SZ_8M@h + addis r8, r8, SZ_8M@h + cmplw r6, r9 + bdnzt lt, 2b + +4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) +2: ori r0, r6, MD_EVALID + mtspr SPRN_MD_CTR, r5 + mtspr SPRN_MD_EPN, r0 + mtspr SPRN_MD_TWC, r7 + mtspr SPRN_MD_RPN, r8 + addi r5, r5, 0x100 + addis r6, r6, SZ_8M@h + addis r8, r8, SZ_8M@h + cmplw r6, r3 + bdnzt lt, 2b +#endif +#ifdef CONFIG_PIN_TLB_IMMR + LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID) + LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED) + mfspr r8, SPRN_IMMR + rlwinm r8, r8, 0, 0xfff80000 + ori r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \ + _PAGE_NO_CACHE | _PAGE_PRESENT + mtspr SPRN_MD_CTR, r5 + mtspr SPRN_MD_EPN, r0 + mtspr SPRN_MD_TWC, r7 + mtspr SPRN_MD_RPN, r8 +#endif +#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA) + lis r0, (MD_RSV4I | MD_TWAM)@h + mtspr SPRN_MI_CTR, r0 +#endif + mtspr SPRN_SRR1, r10 + mtspr SPRN_SRR0, r11 + rfi +#endif /* CONFIG_PIN_TLB */ /* * We put a few things here that have to be page-aligned. -- cgit v1.2.3-59-g8ed1b From 136a9a0f74d2e0d9de5515190fe80344b86b45cf Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:14 +0000 Subject: powerpc/8xx: Don't set IMMR map anymore at boot Only early debug requires IMMR to be mapped early. No need to set it up and pin it in assembly. Map it through page tables at udbg init when necessary. If CONFIG_PIN_TLB_IMMR is selected, pin it once we don't need the 32 Mb pinned RAM anymore. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/13c1e8539fdf363d3146f4884e5c3c76c6c308b5.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_8xx.S | 39 +++++++++++++++++--------------------- arch/powerpc/mm/mmu_decl.h | 4 ++++ arch/powerpc/mm/nohash/8xx.c | 15 +++++++++++---- arch/powerpc/platforms/8xx/Kconfig | 2 +- arch/powerpc/sysdev/cpm_common.c | 2 ++ 5 files changed, 35 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index c9e3d54e6a6f..d607f4b53e0f 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -749,6 +749,23 @@ start_here: rfi /* Load up the kernel context */ 2: +#ifdef CONFIG_PIN_TLB_IMMR + lis r0, MD_TWAM@h + oris r0, r0, 0x1f00 + mtspr SPRN_MD_CTR, r0 + LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID) + tlbie r0 + mtspr SPRN_MD_EPN, r0 + LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED) + mtspr SPRN_MD_TWC, r0 + mfspr r0, SPRN_IMMR + rlwinm r0, r0, 0, 0xfff80000 + ori r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \ + _PAGE_NO_CACHE | _PAGE_PRESENT + mtspr SPRN_MD_RPN, r0 + lis r0, (MD_TWAM | MD_RSV4I)@h + mtspr SPRN_MD_CTR, r0 +#endif tlbia /* Clear all TLB entries */ sync /* wait for tlbia/tlbie to finish */ @@ -797,28 +814,6 @@ initial_mmu: ori r8, r8, MD_APG_INIT@l mtspr SPRN_MD_AP, r8 - /* Map a 512k page for the IMMR to get the processor - * internal registers (among other things). - */ -#ifdef CONFIG_PIN_TLB_IMMR - oris r10, r10, MD_RSV4I@h - ori r10, r10, 0x1c00 - mtspr SPRN_MD_CTR, r10 - - mfspr r9, 638 /* Get current IMMR */ - andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */ - - lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */ - ori r8, r8, MD_EVALID /* Mark it valid */ - mtspr SPRN_MD_EPN, r8 - li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */ - ori r8, r8, MD_SVALID /* Make it valid */ - mtspr SPRN_MD_TWC, r8 - mr r8, r9 /* Create paddr for TLB */ - ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ - mtspr SPRN_MD_RPN, r8 -#endif - /* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */ #ifdef CONFIG_PIN_TLB_TEXT lis r8, MI_RSV4I@h diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 7097e07a209a..1b6d39e9baed 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -182,6 +182,10 @@ static inline void mmu_mark_initmem_nx(void) { } static inline void mmu_mark_rodata_ro(void) { } #endif +#ifdef CONFIG_PPC_8xx +void __init mmu_mapin_immr(void); +#endif + #ifdef CONFIG_PPC_DEBUG_WX void ptdump_check_wx(void); #else diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index b27017109a36..44cbde7612cb 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -66,7 +66,7 @@ void __init MMU_init_hw(void) if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) { unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY; - int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28; + int i = 28; unsigned long addr = 0; unsigned long mem = total_lowmem; @@ -81,12 +81,19 @@ void __init MMU_init_hw(void) } } -static void __init mmu_mapin_immr(void) +static bool immr_is_mapped __initdata; + +void __init mmu_mapin_immr(void) { unsigned long p = PHYS_IMMR_BASE; unsigned long v = VIRT_IMMR_BASE; int offset; + if (immr_is_mapped) + return; + + immr_is_mapped = true; + for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE) map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); } @@ -122,9 +129,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long mapped; + mmu_mapin_immr(); + if (__map_without_ltlbs) { mapped = 0; - mmu_mapin_immr(); if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR)) patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP)); if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) @@ -143,7 +151,6 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) */ mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X); mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL); - mmu_mapin_immr(); } mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped); diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index 0d036cd868ef..04ea1a8a0bdc 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -187,7 +187,7 @@ config PIN_TLB_DATA config PIN_TLB_IMMR bool "Pinned TLB for IMMR" - depends on PIN_TLB || PPC_EARLY_DEBUG_CPM + depends on PIN_TLB default y help This pins the IMMR area with a 512kbytes page. In case diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index 71660bacb264..7dc1960f8bdb 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -68,6 +68,8 @@ static void udbg_putc_cpm(char c) void __init udbg_init_cpm(void) { #ifdef CONFIG_PPC_8xx + mmu_mapin_immr(); + cpm_udbg_txdesc = (u32 __iomem __force *) (CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE + VIRT_IMMR_BASE); -- cgit v1.2.3-59-g8ed1b From 684c1664e0de63398aceb748343541b48d398710 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:15 +0000 Subject: powerpc/8xx: Always pin TLBs at startup. At startup, map 32 Mbytes of memory through 4 pages of 8M, and PIN them inconditionnaly. They need to be pinned because KASAN is using page tables early and the TLBs might be dynamically replaced otherwise. Remove RSV4I flag after installing mappings unless CONFIG_PIN_TLB_XXXX is selected. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b27c5767d18053b59f7eefddc189fcc3acf7b9c2.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_8xx.S | 31 +++++++++++++++++-------------- arch/powerpc/mm/nohash/8xx.c | 19 +------------------ 2 files changed, 18 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index d607f4b53e0f..b0cceee6405c 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -765,6 +765,14 @@ start_here: mtspr SPRN_MD_RPN, r0 lis r0, (MD_TWAM | MD_RSV4I)@h mtspr SPRN_MD_CTR, r0 +#endif +#ifndef CONFIG_PIN_TLB_TEXT + li r0, 0 + mtspr SPRN_MI_CTR, r0 +#endif +#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR) + lis r0, MD_TWAM@h + mtspr SPRN_MD_CTR, r0 #endif tlbia /* Clear all TLB entries */ sync /* wait for tlbia/tlbie to finish */ @@ -802,10 +810,6 @@ initial_mmu: mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */ tlbia /* Invalidate all TLB entries */ -#ifdef CONFIG_PIN_TLB_DATA - oris r10, r10, MD_RSV4I@h - mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ -#endif lis r8, MI_APG_INIT@h /* Set protection modes */ ori r8, r8, MI_APG_INIT@l @@ -814,33 +818,32 @@ initial_mmu: ori r8, r8, MD_APG_INIT@l mtspr SPRN_MD_AP, r8 - /* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */ -#ifdef CONFIG_PIN_TLB_TEXT + /* Map the lower RAM (up to 32 Mbytes) into the ITLB and DTLB */ lis r8, MI_RSV4I@h ori r8, r8, 0x1c00 -#endif + oris r12, r10, MD_RSV4I@h + ori r12, r12, 0x1c00 li r9, 4 /* up to 4 pages of 8M */ mtctr r9 lis r9, KERNELBASE@h /* Create vaddr for TLB */ li r10, MI_PS8MEG | MI_SVALID /* Set 8M byte page */ li r11, MI_BOOTINIT /* Create RPN for address 0 */ - lis r12, _einittext@h - ori r12, r12, _einittext@l 1: -#ifdef CONFIG_PIN_TLB_TEXT mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ addi r8, r8, 0x100 -#endif - ori r0, r9, MI_EVALID /* Mark it valid */ mtspr SPRN_MI_EPN, r0 mtspr SPRN_MI_TWC, r10 mtspr SPRN_MI_RPN, r11 /* Store TLB entry */ + mtspr SPRN_MD_CTR, r12 + addi r12, r12, 0x100 + mtspr SPRN_MD_EPN, r0 + mtspr SPRN_MD_TWC, r10 + mtspr SPRN_MD_RPN, r11 addis r9, r9, 0x80 addis r11, r11, 0x80 - cmpl cr0, r9, r12 - bdnzf gt, 1b + bdnz 1b /* Since the cache is enabled according to the information we * just loaded into the TLB, invalidate and enable the caches here. diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 44cbde7612cb..96e7a58ca5ee 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -62,23 +62,6 @@ unsigned long p_block_mapped(phys_addr_t pa) */ void __init MMU_init_hw(void) { - /* PIN up to the 3 first 8Mb after IMMR in DTLB table */ - if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) { - unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; - unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY; - int i = 28; - unsigned long addr = 0; - unsigned long mem = total_lowmem; - - for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { - mtspr(SPRN_MD_CTR, ctr | (i << 8)); - mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); - mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); - mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); - addr += LARGE_PAGE_SIZE_8M; - mem -= LARGE_PAGE_SIZE_8M; - } - } } static bool immr_is_mapped __initdata; @@ -223,7 +206,7 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, BUG_ON(first_memblock_base != 0); /* 8xx can only access 32MB at the moment */ - memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000)); + memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M)); } /* -- cgit v1.2.3-59-g8ed1b From 400dc0f86102d2ad11d3601f1948fbb02e926431 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:16 +0000 Subject: powerpc/8xx: Drop special handling of Linear and IMMR mappings in I/D TLB handlers Up to now, linear and IMMR mappings are managed via huge TLB entries through specific code directly in TLB miss handlers. This implies some patching of the TLB miss handlers at startup, and a lot of dedicated code. Remove all this specific dedicated code. For now we are back to normal handling via standard 4k pages. In the next patches, linear memory mapping and IMMR mapping will be managed through huge pages. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/221b7e3ead80a5969629938c023f8cfe45fdd2fb.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_8xx.S | 29 +----------- arch/powerpc/mm/nohash/8xx.c | 103 +---------------------------------------- 2 files changed, 3 insertions(+), 129 deletions(-) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index b0cceee6405c..d1546f379757 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -207,31 +207,21 @@ InstructionTLBMiss: mfspr r10, SPRN_SRR0 /* Get effective address of fault */ INVALIDATE_ADJACENT_PAGES_CPU15(r10) mtspr SPRN_MD_EPN, r10 - /* Only modules will cause ITLB Misses as we always - * pin the first 8MB of kernel memory */ #ifdef ITLB_MISS_KERNEL mfcr r11 -#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT) +#if defined(SIMPLE_KERNEL_ADDRESS) cmpi cr0, r10, 0 /* Address >= 0x80000000 */ #else rlwinm r10, r10, 16, 0xfff8 cmpli cr0, r10, PAGE_OFFSET@h -#ifndef CONFIG_PIN_TLB_TEXT - /* It is assumed that kernel code fits into the first 32M */ -0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h - patch_site 0b, patch__itlbmiss_linmem_top -#endif #endif #endif mfspr r10, SPRN_M_TWB /* Get level 1 table */ #ifdef ITLB_MISS_KERNEL -#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT) +#if defined(SIMPLE_KERNEL_ADDRESS) bge+ 3f #else blt+ 3f -#endif -#ifndef CONFIG_PIN_TLB_TEXT - blt cr7, ITLBMissLinear #endif rlwinm r10, r10, 0, 20, 31 oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha @@ -327,19 +317,9 @@ DataStoreTLBMiss: mfspr r10, SPRN_MD_EPN rlwinm r10, r10, 16, 0xfff8 cmpli cr0, r10, PAGE_OFFSET@h -#ifndef CONFIG_PIN_TLB_IMMR - cmpli cr6, r10, VIRT_IMMR_BASE@h -#endif -0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h - patch_site 0b, patch__dtlbmiss_linmem_top mfspr r10, SPRN_M_TWB /* Get level 1 table */ blt+ 3f -#ifndef CONFIG_PIN_TLB_IMMR -0: beq- cr6, DTLBMissIMMR - patch_site 0b, patch__dtlbmiss_immr_jmp -#endif - blt cr7, DTLBMissLinear rlwinm r10, r10, 0, 20, 31 oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: @@ -571,14 +551,9 @@ FixupDAR:/* Entry point for dcbx workaround. */ cmpli cr1, r11, PAGE_OFFSET@h mfspr r11, SPRN_M_TWB /* Get level 1 table */ blt+ cr1, 3f - rlwinm r11, r10, 16, 0xfff8 - -0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h - patch_site 0b, patch__fixupdar_linmem_top /* create physical page address from effective address */ tophys(r11, r10) - blt- cr7, 201f mfspr r11, SPRN_M_TWB /* Get level 1 table */ rlwinm r11, r11, 0, 20, 31 oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 96e7a58ca5ee..b735482e1529 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -55,8 +55,6 @@ unsigned long p_block_mapped(phys_addr_t pa) return 0; } -#define LARGE_PAGE_SIZE_8M (1<<23) - /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ @@ -81,119 +79,20 @@ void __init mmu_mapin_immr(void) map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); } -static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped) -{ - modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16); -} - -static void mmu_patch_addis(s32 *site, long simm) -{ - unsigned int instr = *(unsigned int *)patch_site_addr(site); - - instr &= 0xffff0000; - instr |= ((unsigned long)simm) >> 16; - patch_instruction_site(site, ppc_inst(instr)); -} - -static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) -{ - unsigned long s = offset; - unsigned long v = PAGE_OFFSET + s; - phys_addr_t p = memstart_addr + s; - - for (; s < top; s += PAGE_SIZE) { - map_kernel_page(v, p, prot); - v += PAGE_SIZE; - p += PAGE_SIZE; - } -} - unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { - unsigned long mapped; - mmu_mapin_immr(); - if (__map_without_ltlbs) { - mapped = 0; - if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR)) - patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP)); - if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) - mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0); - } else { - unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); - - mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); - if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) - mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8); - - /* - * Populate page tables to: - * - have them appear in /sys/kernel/debug/kernel_page_tables - * - allow the BDI to find the pages when they are not PINNED - */ - mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X); - mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL); - } - - mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped); - mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped); - - /* If the size of RAM is not an exact power of two, we may not - * have covered RAM in its entirety with 8 MiB - * pages. Consequently, restrict the top end of RAM currently - * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" - * coverage with normal-sized pages (or other reasons) do not - * attempt to allocate outside the allowed range. - */ - if (mapped) - memblock_set_current_limit(mapped); - - block_mapped_ram = mapped; - - return mapped; + return 0; } void mmu_mark_initmem_nx(void) { - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23) - mmu_patch_addis(&patch__itlbmiss_linmem_top8, - -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1))); - if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) { - unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); - unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); - unsigned long etext = __pa(_etext); - - mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext)); - - /* Update page tables for PTDUMP and BDI */ - mmu_mapin_ram_chunk(0, einittext8, __pgprot(0)); - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { - mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT); - mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL); - } else { - mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT); - mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL); - } - } } #ifdef CONFIG_STRICT_KERNEL_RWX void mmu_mark_rodata_ro(void) { - unsigned long sinittext = __pa(_sinittext); - unsigned long etext = __pa(_etext); - - if (CONFIG_DATA_SHIFT < 23) - mmu_patch_addis(&patch__dtlbmiss_romem_top8, - -__pa(((unsigned long)_sinittext) & - ~(LARGE_PAGE_SIZE_8M - 1))); - mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext)); - - /* Update page tables for PTDUMP and BDI */ - mmu_mapin_ram_chunk(0, sinittext, __pgprot(0)); - mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX); - mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO); } #endif -- cgit v1.2.3-59-g8ed1b From 1251288e64ba44969e1c4d59e5ee88a6e873447b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:17 +0000 Subject: powerpc/8xx: Remove now unused TLB miss functions The code to setup linear and IMMR mapping via huge TLB entries is not called anymore. Remove it. Also remove the handling of removed code exits in the perf driver. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/75750d25849cb8e73ca519866bb892d7eb9649c0.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 8 +-- arch/powerpc/kernel/head_8xx.S | 83 ---------------------------- arch/powerpc/perf/8xx-pmu.c | 10 ---- 3 files changed, 1 insertion(+), 100 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 4d3ef3841b00..e82368838416 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -240,13 +240,7 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) } /* patch sites */ -extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8; -extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp; -extern s32 patch__fixupdar_linmem_top; -extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8; - -extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2; -extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3; +extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1; extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index d1546f379757..fb5d17187772 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -278,33 +278,6 @@ InstructionTLBMiss: rfi #endif -#ifndef CONFIG_PIN_TLB_TEXT -ITLBMissLinear: - mtcr r11 -#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23 - patch_site 0f, patch__itlbmiss_linmem_top8 - - mfspr r10, SPRN_SRR0 -0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha - rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K - ori r11, r11, MI_PS512K | MI_SVALID - rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */ -#else - /* Set 8M byte page and mark it valid */ - li r11, MI_PS8MEG | MI_SVALID - rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ -#endif - mtspr SPRN_MI_TWC, r11 - ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT - mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ - -0: mfspr r10, SPRN_SPRG_SCRATCH0 - mfspr r11, SPRN_SPRG_SCRATCH1 - rfi - patch_site 0b, patch__itlbmiss_exit_2 -#endif - . = 0x1200 DataStoreTLBMiss: mtspr SPRN_DAR, r10 @@ -371,62 +344,6 @@ DataStoreTLBMiss: rfi patch_site 0b, patch__dtlbmiss_exit_1 -DTLBMissIMMR: - mtcr r11 - /* Set 512k byte guarded page and mark it valid */ - li r10, MD_PS512K | MD_GUARDED | MD_SVALID - mtspr SPRN_MD_TWC, r10 - mfspr r10, SPRN_IMMR /* Get current IMMR */ - rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT | _PAGE_NO_CACHE - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - - li r11, RPN_PATTERN - -0: mfspr r10, SPRN_DAR - mtspr SPRN_DAR, r11 /* Tag DAR */ - mfspr r11, SPRN_M_TW - rfi - patch_site 0b, patch__dtlbmiss_exit_2 - -DTLBMissLinear: - mtcr r11 - rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ -#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_DATA_SHIFT < 23 - patch_site 0f, patch__dtlbmiss_romem_top8 - -0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha - rlwinm r11, r11, 0, 0xff800000 - neg r10, r11 - or r11, r11, r10 - rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K - ori r11, r11, MI_PS512K | MI_SVALID - mfspr r10, SPRN_MD_EPN - rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */ -#else - /* Set 8M byte page and mark it valid */ - li r11, MD_PS8MEG | MD_SVALID -#endif - mtspr SPRN_MD_TWC, r11 -#ifdef CONFIG_STRICT_KERNEL_RWX - patch_site 0f, patch__dtlbmiss_romem_top - -0: subis r11, r10, 0 - rlwimi r10, r11, 11, _PAGE_RO -#endif - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - - li r11, RPN_PATTERN - -0: mfspr r10, SPRN_DAR - mtspr SPRN_DAR, r11 /* Tag DAR */ - mfspr r11, SPRN_M_TW - rfi - patch_site 0b, patch__dtlbmiss_exit_3 - /* This is an instruction TLB error on the MPC8xx. This could be due * to many reasons, such as executing guarded memory or illegal instruction * addresses. There is nothing to do but handle a big time error fault. diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c index acc27fc63eb7..e53c3c161257 100644 --- a/arch/powerpc/perf/8xx-pmu.c +++ b/arch/powerpc/perf/8xx-pmu.c @@ -100,9 +100,6 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags) unsigned long target = patch_site_addr(&patch__itlbmiss_perf); patch_branch_site(&patch__itlbmiss_exit_1, target, 0); -#ifndef CONFIG_PIN_TLB_TEXT - patch_branch_site(&patch__itlbmiss_exit_2, target, 0); -#endif } val = itlb_miss_counter; break; @@ -111,8 +108,6 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags) unsigned long target = patch_site_addr(&patch__dtlbmiss_perf); patch_branch_site(&patch__dtlbmiss_exit_1, target, 0); - patch_branch_site(&patch__dtlbmiss_exit_2, target, 0); - patch_branch_site(&patch__dtlbmiss_exit_3, target, 0); } val = dtlb_miss_counter; break; @@ -175,9 +170,6 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) __PPC_SPR(SPRN_SPRG_SCRATCH0)); patch_instruction_site(&patch__itlbmiss_exit_1, insn); -#ifndef CONFIG_PIN_TLB_TEXT - patch_instruction_site(&patch__itlbmiss_exit_2, insn); -#endif } break; case PERF_8xx_ID_DTLB_LOAD_MISS: @@ -187,8 +179,6 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) __PPC_SPR(SPRN_DAR)); patch_instruction_site(&patch__dtlbmiss_exit_1, insn); - patch_instruction_site(&patch__dtlbmiss_exit_2, insn); - patch_instruction_site(&patch__dtlbmiss_exit_3, insn); } break; } -- cgit v1.2.3-59-g8ed1b From 0c8c2c9c201b44eed6c10d7c5c8d25fe5aab87ce Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:18 +0000 Subject: powerpc/8xx: Move DTLB perf handling closer. Now that space have been freed next to the DTLB miss handler, it's associated DTLB perf handling can be brought back in the same place. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/97f48cc1a2ea6b895bfac0752cbe59deaf2eecda.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_8xx.S | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index fb5d17187772..9f3f7f3d03a7 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -344,6 +344,17 @@ DataStoreTLBMiss: rfi patch_site 0b, patch__dtlbmiss_exit_1 +#ifdef CONFIG_PERF_EVENTS + patch_site 0f, patch__dtlbmiss_perf +0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) + addi r10, r10, 1 + stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) + mfspr r10, SPRN_DAR + mtspr SPRN_DAR, r11 /* Tag DAR */ + mfspr r11, SPRN_M_TW + rfi +#endif + /* This is an instruction TLB error on the MPC8xx. This could be due * to many reasons, such as executing guarded memory or illegal instruction * addresses. There is nothing to do but handle a big time error fault. @@ -390,18 +401,6 @@ DARFixed:/* Return from dcbx instruction bug workaround */ /* 0x300 is DataAccess exception, needed by bad_page_fault() */ EXC_XFER_LITE(0x300, handle_page_fault) -/* Called from DataStoreTLBMiss when perf TLB misses events are activated */ -#ifdef CONFIG_PERF_EVENTS - patch_site 0f, patch__dtlbmiss_perf -0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) - addi r10, r10, 1 - stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) - mfspr r10, SPRN_DAR - mtspr SPRN_DAR, r11 /* Tag DAR */ - mfspr r11, SPRN_M_TW - rfi -#endif - stack_overflow: vmap_stack_overflow_exception -- cgit v1.2.3-59-g8ed1b From a0591b60eef965f7f5255ad4696bbba9af4b43d0 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:19 +0000 Subject: powerpc/mm: Don't be too strict with _etext alignment on PPC32 Similar to PPC64, accept to map RO data as ROX as a trade off between between security and memory usage. Having RO data executable is not a high risk as RO data can't be modified to forge an exploit. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/8c4a0d89d944eed984dd941e509614031a5ace2b.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 26 -------------------------- arch/powerpc/kernel/vmlinux.lds.S | 3 +-- 2 files changed, 1 insertion(+), 28 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1d4ef4f27dec..d147d379b1b9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -778,32 +778,6 @@ config THREAD_SHIFT Used to define the stack size. The default is almost always what you want. Only change this if you know what you are doing. -config ETEXT_SHIFT_BOOL - bool "Set custom etext alignment" if STRICT_KERNEL_RWX && \ - (PPC_BOOK3S_32 || PPC_8xx) - depends on ADVANCED_OPTIONS - help - This option allows you to set the kernel end of text alignment. When - RAM is mapped by blocks, the alignment needs to fit the size and - number of possible blocks. The default should be OK for most configs. - - Say N here unless you know what you are doing. - -config ETEXT_SHIFT - int "_etext shift" if ETEXT_SHIFT_BOOL - range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 - range 19 23 if STRICT_KERNEL_RWX && PPC_8xx - default 17 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 - default 19 if STRICT_KERNEL_RWX && PPC_8xx - default PPC_PAGE_SHIFT - help - On Book3S 32 (603+), IBATs are used to map kernel text. - Smaller is the alignment, greater is the number of necessary IBATs. - - On 8xx, large pages (512kb or 8M) are used to map kernel linear - memory. Aligning to 8M reduces TLB misses as only 8M pages are used - in that case. - config DATA_SHIFT_BOOL bool "Set custom data alignment" if STRICT_KERNEL_RWX && \ (PPC_BOOK3S_32 || PPC_8xx) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 31a0f201fb6f..54f23205c2b9 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -15,7 +15,6 @@ #include #define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT) -#define ETEXT_ALIGN_SIZE (1 << CONFIG_ETEXT_SHIFT) ENTRY(_stext) @@ -116,7 +115,7 @@ SECTIONS } :text - . = ALIGN(ETEXT_ALIGN_SIZE); + . = ALIGN(PAGE_SIZE); _etext = .; PROVIDE32 (etext = .); -- cgit v1.2.3-59-g8ed1b From c8bef10a9f17b2b9549e37878b2bcd48039c136b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:20 +0000 Subject: powerpc/8xx: Refactor kernel address boundary comparison Now that linear and IMMR dedicated TLB handling is gone, kernel boundary address comparison is similar in ITLB miss handler and in DTLB miss handler. Create a macro named compare_to_kernel_boundary. When TASK_SIZE is strictly below 0x80000000 and PAGE_OFFSET is above 0x80000000, it is enough to compare to 0x8000000, and this can be done with a single instruction. Using not. instruction, we get to use 'blt' conditional branch as when doing a regular comparison: 0x00000000 <= addr <= 0x7fffffff ==> 0xffffffff >= NOT(addr) >= 0x80000000 The above test corresponds to a 'blt' Otherwise, do a regular comparison using two instructions. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6312575d06a8813105e6564a3b12e1d373aa1b2f.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_8xx.S | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 9f3f7f3d03a7..9a117b9f0998 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -32,10 +32,15 @@ #include "head_32.h" +.macro compare_to_kernel_boundary scratch, addr #if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 /* By simply checking Address >= 0x80000000, we know if its a kernel address */ -#define SIMPLE_KERNEL_ADDRESS 1 + not. \scratch, \addr +#else + rlwinm \scratch, \addr, 16, 0xfff8 + cmpli cr0, \scratch, PAGE_OFFSET@h #endif +.endm /* * We need an ITLB miss handler for kernel addresses if: @@ -209,20 +214,11 @@ InstructionTLBMiss: mtspr SPRN_MD_EPN, r10 #ifdef ITLB_MISS_KERNEL mfcr r11 -#if defined(SIMPLE_KERNEL_ADDRESS) - cmpi cr0, r10, 0 /* Address >= 0x80000000 */ -#else - rlwinm r10, r10, 16, 0xfff8 - cmpli cr0, r10, PAGE_OFFSET@h -#endif + compare_to_kernel_boundary r10, r10 #endif mfspr r10, SPRN_M_TWB /* Get level 1 table */ #ifdef ITLB_MISS_KERNEL -#if defined(SIMPLE_KERNEL_ADDRESS) - bge+ 3f -#else blt+ 3f -#endif rlwinm r10, r10, 0, 20, 31 oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: @@ -288,9 +284,7 @@ DataStoreTLBMiss: * kernel page tables. */ mfspr r10, SPRN_MD_EPN - rlwinm r10, r10, 16, 0xfff8 - cmpli cr0, r10, PAGE_OFFSET@h - + compare_to_kernel_boundary r10, r10 mfspr r10, SPRN_M_TWB /* Get level 1 table */ blt+ 3f rlwinm r10, r10, 0, 20, 31 -- cgit v1.2.3-59-g8ed1b From 34536d78068318def0a370462cbc3319e1ca9014 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:22 +0000 Subject: powerpc/8xx: Add a function to early map kernel via huge pages Add a function to early map kernel memory using huge pages. For 512k pages, just use standard page table and map in using 512k pages. For 8M pages, create a hugepd table and populate the two PGD entries with it. This function can only be used to create page tables at startup. Once the regular SLAB allocation functions replace memblock functions, this function cannot allocate new pages anymore. However it can still update existing mappings with new protections. hugepd_none() macro is moved into asm/hugetlb.h to be usable outside of mm/hugetlbpage.c early_pte_alloc_kernel() is made visible. _PAGE_HUGE flag is now displayed by ptdump. Signed-off-by: Christophe Leroy [mpe: Change ptdump display to use "huge"] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/68325bcd3b6f93127f7810418a2352c3519066d6.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h | 5 +++ arch/powerpc/include/asm/pgtable.h | 2 + arch/powerpc/mm/nohash/8xx.c | 52 ++++++++++++++++++++++++ arch/powerpc/mm/pgtable_32.c | 2 +- arch/powerpc/mm/ptdump/8xx.c | 5 +++ arch/powerpc/platforms/Kconfig.cputype | 1 + 6 files changed, 66 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index 1c7d4693a78e..e752a5807a59 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -35,6 +35,11 @@ static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshi *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M); } +static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift) +{ + *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M); +} + static inline int check_and_get_huge_psize(int shift) { return shift_to_mmu_psize(shift); diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index b1f1d5339735..961895be932a 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -107,6 +107,8 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr); void pgtable_cache_add(unsigned int shift); +pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va); + #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) void mark_initmem_nx(void); #else diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index b735482e1529..72fb75f2a5f1 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -9,9 +9,11 @@ #include #include +#include #include #include #include +#include #include @@ -55,6 +57,56 @@ unsigned long p_block_mapped(phys_addr_t pa) return 0; } +static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va) +{ + if (hpd_val(*pmdp) == 0) { + pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K); + + if (!ptep) + return NULL; + + hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M); + hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M); + } + return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); +} + +static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, + pgprot_t prot, int psize, bool new) +{ + pmd_t *pmdp = pmd_ptr_k(va); + pte_t *ptep; + + if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) + return -EINVAL; + + if (new) { + if (WARN_ON(slab_is_available())) + return -EINVAL; + + if (psize == MMU_PAGE_512K) + ptep = early_pte_alloc_kernel(pmdp, va); + else + ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va); + } else { + if (psize == MMU_PAGE_512K) + ptep = pte_offset_kernel(pmdp, va); + else + ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); + } + + if (WARN_ON(!ptep)) + return -ENOMEM; + + /* The PTE should never be already present */ + if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot))) + return -EINVAL; + + set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot))); + + return 0; +} + /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index bd0cb6e3573e..05902bbff8d6 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -61,7 +61,7 @@ static void __init *early_alloc_pgtable(unsigned long size) return ptr; } -static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) +pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) { if (pmd_none(*pmdp)) { pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c index 9e2d8e847d6e..4bc350736c1d 100644 --- a/arch/powerpc/mm/ptdump/8xx.c +++ b/arch/powerpc/mm/ptdump/8xx.c @@ -11,6 +11,11 @@ static const struct flag_info flag_array[] = { { + .mask = _PAGE_HUGE, + .val = _PAGE_HUGE, + .set = "huge", + .clear = " ", + }, { .mask = _PAGE_SH, .val = 0, .set = "user", diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index b0587b833517..404f26917da7 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -56,6 +56,7 @@ config PPC_8xx select PPC_HAVE_KUEP select PPC_HAVE_KUAP select HAVE_ARCH_VMAP_STACK + select HUGETLBFS config 40x bool "AMCC 40x" -- cgit v1.2.3-59-g8ed1b From a623bb5861dc442dc8de9edc9b3116f8b7c235c4 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:23 +0000 Subject: powerpc/8xx: Map IMMR with a huge page Map the IMMR area with a single 512k huge page. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/9495dba06669da40e133f24607758fa6dcc65f66.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/nohash/8xx.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 72fb75f2a5f1..f8fff1fa72e3 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -118,17 +118,13 @@ static bool immr_is_mapped __initdata; void __init mmu_mapin_immr(void) { - unsigned long p = PHYS_IMMR_BASE; - unsigned long v = VIRT_IMMR_BASE; - int offset; - if (immr_is_mapped) return; immr_is_mapped = true; - for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE) - map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); + __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE, + PAGE_KERNEL_NCG, MMU_PAGE_512K, true); } unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) -- cgit v1.2.3-59-g8ed1b From cf209951fa7f2e7a8ec92f45f27ea11bc024bbfc Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:24 +0000 Subject: powerpc/8xx: Map linear memory with huge pages Map linear memory space with 512k and 8M pages whenever possible. Three mappings are performed: - One for kernel text - One for RO data - One for the rest Separating the mappings is done to be able to update the protection later when using STRICT_KERNEL_RWX. The ITLB miss handler now need to also handle huge TLBs unless kernel text in pinned. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c44f0ab5510474f25123d904cd1f4e5c6aa3c1ac.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_8xx.S | 4 ++-- arch/powerpc/mm/nohash/8xx.c | 50 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 9a117b9f0998..abb71fad7d6a 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -224,7 +224,7 @@ InstructionTLBMiss: 3: mtcr r11 #endif -#ifdef CONFIG_HUGETLBFS +#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT) lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MD_TWC, r11 #else @@ -234,7 +234,7 @@ InstructionTLBMiss: #endif mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ -#ifdef CONFIG_HUGETLBFS +#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT) rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K mtspr SPRN_MI_TWC, r11 #endif diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index f8fff1fa72e3..ec3ef75895d8 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -127,20 +127,68 @@ void __init mmu_mapin_immr(void) PAGE_KERNEL_NCG, MMU_PAGE_512K, true); } +static void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, + pgprot_t prot, bool new) +{ + unsigned long v = PAGE_OFFSET + offset; + unsigned long p = offset; + + WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); + + for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K) + __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M) + __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); + for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K) + __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + + if (!new) + flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); +} + unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { + unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); + unsigned long sinittext = __pa(_sinittext); + unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; + unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); + + WARN_ON(top < einittext8); + mmu_mapin_immr(); - return 0; + if (__map_without_ltlbs) + return 0; + + mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); + + if (top > SZ_32M) + memblock_set_current_limit(top); + + block_mapped_ram = top; + + return top; } void mmu_mark_initmem_nx(void) { + unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); + unsigned long sinittext = __pa(_sinittext); + unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; + unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); + + mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false); + mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); } #ifdef CONFIG_STRICT_KERNEL_RWX void mmu_mark_rodata_ro(void) { + unsigned long sinittext = __pa(_sinittext); + + mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); } #endif -- cgit v1.2.3-59-g8ed1b From da1adea07576722da4597b0df7d00931f0203229 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:25 +0000 Subject: powerpc/8xx: Allow STRICT_KERNEL_RwX with pinned TLB Pinned TLB are 8M. Now that there is no strict boundary anymore between text and RO data, it is possible to use 8M pinned executable TLB that covers both text and RO data. When PIN_TLB_DATA or PIN_TLB_TEXT is selected, enforce 8M RW data alignment and allow STRICT_KERNEL_RWX. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c535fc97bf0dd8693192e25feeed8088701e00c6.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 8 +++++--- arch/powerpc/mm/nohash/8xx.c | 9 +++++++-- arch/powerpc/platforms/8xx/Kconfig | 2 +- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d147d379b1b9..f5e82629e2cd 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -779,9 +779,10 @@ config THREAD_SHIFT want. Only change this if you know what you are doing. config DATA_SHIFT_BOOL - bool "Set custom data alignment" if STRICT_KERNEL_RWX && \ - (PPC_BOOK3S_32 || PPC_8xx) + bool "Set custom data alignment" depends on ADVANCED_OPTIONS + depends on STRICT_KERNEL_RWX + depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !PIN_TLB_TEXT) help This option allows you to set the kernel data alignment. When RAM is mapped by blocks, the alignment needs to fit the size and @@ -803,7 +804,8 @@ config DATA_SHIFT On 8xx, large pages (512kb or 8M) are used to map kernel linear memory. Aligning to 8M reduces TLB misses as only 8M pages are used - in that case. + in that case. If PIN_TLB is selected, it must be aligned to 8M as + 8M pages will be pinned. config FORCE_MAX_ZONEORDER int "Maximum zone order" diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index ec3ef75895d8..d8697f535c3e 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -127,8 +127,8 @@ void __init mmu_mapin_immr(void) PAGE_KERNEL_NCG, MMU_PAGE_512K, true); } -static void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, - pgprot_t prot, bool new) +static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, + pgprot_t prot, bool new) { unsigned long v = PAGE_OFFSET + offset; unsigned long p = offset; @@ -181,6 +181,9 @@ void mmu_mark_initmem_nx(void) mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false); mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); + + if (IS_ENABLED(CONFIG_PIN_TLB_TEXT)) + mmu_pin_tlb(block_mapped_ram, false); } #ifdef CONFIG_STRICT_KERNEL_RWX @@ -189,6 +192,8 @@ void mmu_mark_rodata_ro(void) unsigned long sinittext = __pa(_sinittext); mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); + if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) + mmu_pin_tlb(block_mapped_ram, true); } #endif diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index 04ea1a8a0bdc..05669f2fadce 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -167,7 +167,7 @@ menu "8xx advanced setup" config PIN_TLB bool "Pinned Kernel TLBs" - depends on ADVANCED_OPTIONS && !DEBUG_PAGEALLOC && !STRICT_KERNEL_RWX + depends on ADVANCED_OPTIONS && !DEBUG_PAGEALLOC help On the 8xx, we have 32 instruction TLBs and 32 data TLBs. In each table 4 TLBs can be pinned. -- cgit v1.2.3-59-g8ed1b From fcdafd10a363cf3278ce29c6c9a92930380c6cd8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:26 +0000 Subject: powerpc/8xx: Allow large TLBs with DEBUG_PAGEALLOC DEBUG_PAGEALLOC only manages RW data. Text and RO data can still be mapped with hugepages and pinned TLB. In order to map with hugepages, also enforce a 512kB data alignment minimum. That's a trade-off between size of speed, taking into account that DEBUG_PAGEALLOC is a debug option. Anyway the alignment is still tunable. We also allow tuning of alignment for book3s to limit the complexity of the test in Kconfig that will anyway disappear in the following patches once DEBUG_PAGEALLOC is handled together with BATs. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c13256f2d356a316715da61fe089b3623ef217a5.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 11 +++++++---- arch/powerpc/mm/init_32.c | 5 ++++- arch/powerpc/mm/nohash/8xx.c | 11 ++++++++--- arch/powerpc/platforms/8xx/Kconfig | 2 +- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f5e82629e2cd..fcb0a9ae9872 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -781,8 +781,9 @@ config THREAD_SHIFT config DATA_SHIFT_BOOL bool "Set custom data alignment" depends on ADVANCED_OPTIONS - depends on STRICT_KERNEL_RWX - depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !PIN_TLB_TEXT) + depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC + depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && \ + (!PIN_TLB_TEXT || !STRICT_KERNEL_RWX)) help This option allows you to set the kernel data alignment. When RAM is mapped by blocks, the alignment needs to fit the size and @@ -793,10 +794,12 @@ config DATA_SHIFT_BOOL config DATA_SHIFT int "Data shift" if DATA_SHIFT_BOOL default 24 if STRICT_KERNEL_RWX && PPC64 - range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 - range 19 23 if STRICT_KERNEL_RWX && PPC_8xx + range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32 + range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 23 if STRICT_KERNEL_RWX && PPC_8xx + default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA + default 19 if DEBUG_PAGEALLOC && PPC_8xx default PPC_PAGE_SHIFT help On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index a6991ef8727d..8977a7c2543d 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -96,11 +96,14 @@ static void __init MMU_setup(void) if (strstr(boot_command_line, "noltlbs")) { __map_without_ltlbs = 1; } + if (IS_ENABLED(CONFIG_PPC_8xx)) + return; + if (debug_pagealloc_enabled()) { __map_without_bats = 1; __map_without_ltlbs = 1; } - if (strict_kernel_rwx_enabled() && !IS_ENABLED(CONFIG_PPC_8xx)) + if (strict_kernel_rwx_enabled()) __map_without_ltlbs = 1; } diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index d8697f535c3e..286441bbbe49 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -150,7 +150,8 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long sinittext = __pa(_sinittext); - unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; + bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled(); + unsigned long boundary = strict_boundary ? sinittext : etext8; unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); WARN_ON(top < einittext8); @@ -161,8 +162,12 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return 0; mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); - mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); - mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); + if (debug_pagealloc_enabled()) { + top = boundary; + } else { + mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); + } if (top > SZ_32M) memblock_set_current_limit(top); diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index 05669f2fadce..abb2b45b2789 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -167,7 +167,7 @@ menu "8xx advanced setup" config PIN_TLB bool "Pinned Kernel TLBs" - depends on ADVANCED_OPTIONS && !DEBUG_PAGEALLOC + depends on ADVANCED_OPTIONS help On the 8xx, we have 32 instruction TLBs and 32 data TLBs. In each table 4 TLBs can be pinned. -- cgit v1.2.3-59-g8ed1b From a2feeb2c2ecbd9c9206d66f238ca710b760c9ef5 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:27 +0000 Subject: powerpc/8xx: Implement dedicated kasan_init_region() Implement a kasan_init_region() dedicated to 8xx that allocates KASAN regions using huge pages. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/d2d60202a8821dc81cffe6ff59cc13c15b7e4bb6.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/kasan/8xx.c | 74 ++++++++++++++++++++++++++++++++++++++++++ arch/powerpc/mm/kasan/Makefile | 1 + 2 files changed, 75 insertions(+) create mode 100644 arch/powerpc/mm/kasan/8xx.c diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c new file mode 100644 index 000000000000..db4ef44af22f --- /dev/null +++ b/arch/powerpc/mm/kasan/8xx.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define DISABLE_BRANCH_PROFILING + +#include +#include +#include +#include + +static int __init +kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block) +{ + pmd_t *pmd = pmd_ptr_k(k_start); + unsigned long k_cur, k_next; + + for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) { + pte_basic_t *new; + + k_next = pgd_addr_end(k_cur, k_end); + k_next = pgd_addr_end(k_next, k_end); + if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) + continue; + + new = memblock_alloc(sizeof(pte_basic_t), SZ_4K); + if (!new) + return -ENOMEM; + + *new = pte_val(pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block)), PAGE_KERNEL))); + + hugepd_populate_kernel((hugepd_t *)pmd, (pte_t *)new, PAGE_SHIFT_8M); + hugepd_populate_kernel((hugepd_t *)pmd + 1, (pte_t *)new, PAGE_SHIFT_8M); + } + return 0; +} + +int __init kasan_init_region(void *start, size_t size) +{ + unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); + unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); + unsigned long k_cur; + int ret; + void *block; + + block = memblock_alloc(k_end - k_start, SZ_8M); + if (!block) + return -ENOMEM; + + if (IS_ALIGNED(k_start, SZ_8M)) { + kasan_init_shadow_8M(k_start, ALIGN_DOWN(k_end, SZ_8M), block); + k_cur = ALIGN_DOWN(k_end, SZ_8M); + if (k_cur == k_end) + goto finish; + } else { + k_cur = k_start; + } + + ret = kasan_init_shadow_page_tables(k_start, k_end); + if (ret) + return ret; + + for (; k_cur < k_end; k_cur += PAGE_SIZE) { + pmd_t *pmd = pmd_ptr_k(k_cur); + void *va = block + k_cur - k_start; + pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); + + if (k_cur < ALIGN_DOWN(k_end, SZ_512K)) + pte = pte_mkhuge(pte); + + __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); + } +finish: + flush_tlb_kernel_range(k_start, k_end); + return 0; +} diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile index 6577897673dd..440038ea79f1 100644 --- a/arch/powerpc/mm/kasan/Makefile +++ b/arch/powerpc/mm/kasan/Makefile @@ -3,3 +3,4 @@ KASAN_SANITIZE := n obj-$(CONFIG_PPC32) += kasan_init_32.o +obj-$(CONFIG_PPC_8xx) += 8xx.o -- cgit v1.2.3-59-g8ed1b From 2b279c0348af62f42be346c1ea6d70bac98df0f9 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:28 +0000 Subject: powerpc/32s: Allow mapping with BATs with DEBUG_PAGEALLOC DEBUG_PAGEALLOC only manages RW data. Text and RO data can still be mapped with BATs. In order to map with BATs, also enforce data alignment. Set by default to 256M which is a good compromise for keeping enough BATs for also KASAN and IMMR. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/fd29c1718ee44d82115d0e835ced808eb4ccbf51.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 1 + arch/powerpc/mm/book3s32/mmu.c | 6 ++++++ arch/powerpc/mm/init_32.c | 5 ++--- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index fcb0a9ae9872..752deddc9ed9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -797,6 +797,7 @@ config DATA_SHIFT range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32 range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 + default 18 if DEBUG_PAGEALLOC && PPC_BOOK3S_32 default 23 if STRICT_KERNEL_RWX && PPC_8xx default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA default 19 if DEBUG_PAGEALLOC && PPC_8xx diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index a9b2cbc74797..a6dcc708eee3 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -170,6 +170,12 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) pr_debug("RAM mapped without BATs\n"); return base; } + if (debug_pagealloc_enabled()) { + if (base >= border) + return base; + if (top >= border) + top = border; + } if (!strict_kernel_rwx_enabled() || base >= border || top <= border) return __mmu_mapin_ram(base, top); diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 8977a7c2543d..36c39bd37256 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -99,10 +99,9 @@ static void __init MMU_setup(void) if (IS_ENABLED(CONFIG_PPC_8xx)) return; - if (debug_pagealloc_enabled()) { - __map_without_bats = 1; + if (debug_pagealloc_enabled()) __map_without_ltlbs = 1; - } + if (strict_kernel_rwx_enabled()) __map_without_ltlbs = 1; } -- cgit v1.2.3-59-g8ed1b From 7974c4732642f710b5111165ae1f7f7fed822282 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:29 +0000 Subject: powerpc/32s: Implement dedicated kasan_init_region() Implement a kasan_init_region() dedicated to book3s/32 that allocates KASAN regions using BATs. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/709e821602b48a1d7c211a9b156da26db98c3e9d.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/kasan.h | 1 + arch/powerpc/mm/kasan/Makefile | 1 + arch/powerpc/mm/kasan/book3s_32.c | 57 +++++++++++++++++++++++++++++++++++ arch/powerpc/mm/kasan/kasan_init_32.c | 2 +- 4 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/mm/kasan/book3s_32.c diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index 107a24c3f7b3..be85c7005fb1 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -34,6 +34,7 @@ static inline void kasan_init(void) { } static inline void kasan_late_init(void) { } #endif +void kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte); int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end); int kasan_init_region(void *start, size_t size); diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile index 440038ea79f1..bb1a5408b86b 100644 --- a/arch/powerpc/mm/kasan/Makefile +++ b/arch/powerpc/mm/kasan/Makefile @@ -4,3 +4,4 @@ KASAN_SANITIZE := n obj-$(CONFIG_PPC32) += kasan_init_32.o obj-$(CONFIG_PPC_8xx) += 8xx.o +obj-$(CONFIG_PPC_BOOK3S_32) += book3s_32.o diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c new file mode 100644 index 000000000000..4bc491a4a1fd --- /dev/null +++ b/arch/powerpc/mm/kasan/book3s_32.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define DISABLE_BRANCH_PROFILING + +#include +#include +#include +#include + +int __init kasan_init_region(void *start, size_t size) +{ + unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); + unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); + unsigned long k_cur = k_start; + int k_size = k_end - k_start; + int k_size_base = 1 << (ffs(k_size) - 1); + int ret; + void *block; + + block = memblock_alloc(k_size, k_size_base); + + if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) { + int k_size_more = 1 << (ffs(k_size - k_size_base) - 1); + + setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL); + if (k_size_more >= SZ_128K) + setbat(-1, k_start + k_size_base, __pa(block) + k_size_base, + k_size_more, PAGE_KERNEL); + if (v_block_mapped(k_start)) + k_cur = k_start + k_size_base; + if (v_block_mapped(k_start + k_size_base)) + k_cur = k_start + k_size_base + k_size_more; + + update_bats(); + } + + if (!block) + block = memblock_alloc(k_size, PAGE_SIZE); + if (!block) + return -ENOMEM; + + ret = kasan_init_shadow_page_tables(k_start, k_end); + if (ret) + return ret; + + kasan_update_early_region(k_start, k_cur, __pte(0)); + + for (; k_cur < k_end; k_cur += PAGE_SIZE) { + pmd_t *pmd = pmd_ptr_k(k_cur); + void *va = block + k_cur - k_start; + pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); + + __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); + } + flush_tlb_kernel_range(k_start, k_end); + return 0; +} diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 76d418af4ce8..c42085801c04 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -79,7 +79,7 @@ int __init __weak kasan_init_region(void *start, size_t size) return 0; } -static void __init +void __init kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) { unsigned long k_cur; -- cgit v1.2.3-59-g8ed1b From c5ff46d69c410f7fac173e4fde3eea484b4b4eda Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 22 May 2020 23:33:18 +1000 Subject: powerpc: Add ppc_inst_next() In a few places we want to calculate the address of the next instruction. Previously that was simple, we just added 4 bytes, or if using a u32 * we incremented that pointer by 1. But prefixed instructions make it more complicated, we need to advance by either 4 or 8 bytes depending on the actual instruction. We also can't do pointer arithmetic using struct ppc_inst, because it is always 8 bytes in size on 64-bit, even though we might only need to advance by 4 bytes. So add a ppc_inst_next() helper which calculates the location of the next instruction, if the given instruction was located at the given address. Note the instruction doesn't need to actually be at the address in memory. Although it would seem natural for the value to be passed by value, that makes it too easy to write a loop that will read off the end of a page, eg: for (; src < end; src = ppc_inst_next(src, *src), dest = ppc_inst_next(dest, *dest)) As noticed by Christophe and Jordan, if end is the exact end of a page, and the next page is not mapped, this will fault, because *dest will read 8 bytes, 4 bytes into the next page. So value is passed by reference, so the helper can be careful to use ppc_inst_read() on it. Signed-off-by: Michael Ellerman Reviewed-by: Jordan Niethe Link: https://lore.kernel.org/r/20200522133318.1681406-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/inst.h | 13 +++++++++++++ arch/powerpc/kernel/uprobes.c | 2 +- arch/powerpc/lib/feature-fixups.c | 15 ++++++++------- arch/powerpc/xmon/xmon.c | 2 +- 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index d82e0c99cfa1..5b756ba77ed2 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -100,6 +100,19 @@ static inline int ppc_inst_len(struct ppc_inst x) return ppc_inst_prefixed(x) ? 8 : 4; } +/* + * Return the address of the next instruction, if the instruction @value was + * located at @location. + */ +static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *value) +{ + struct ppc_inst tmp; + + tmp = ppc_inst_read(value); + + return location + ppc_inst_len(tmp); +} + int probe_user_read_inst(struct ppc_inst *inst, struct ppc_inst __user *nip); diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 83e883e1a42d..d200e7df7167 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -112,7 +112,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) * support doesn't exist and have to fix-up the next instruction * to be executed. */ - regs->nip = utask->vaddr + ppc_inst_len(ppc_inst_read(&auprobe->insn)); + regs->nip = (unsigned long)ppc_inst_next((void *)utask->vaddr, &auprobe->insn); user_disable_single_step(current); return 0; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 80f320c2e189..4c0a7ee9fa00 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -68,7 +68,7 @@ static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest, static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) { - struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest; + struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest, nop; start = calc_addr(fcur, fcur->start_off); end = calc_addr(fcur, fcur->end_off); @@ -84,14 +84,15 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) src = alt_start; dest = start; - for (; src < alt_end; src = (void *)src + ppc_inst_len(ppc_inst_read(src)), - (dest = (void *)dest + ppc_inst_len(ppc_inst_read(dest)))) { + for (; src < alt_end; src = ppc_inst_next(src, src), + dest = ppc_inst_next(dest, dest)) { if (patch_alt_instruction(src, dest, alt_start, alt_end)) return 1; } - for (; dest < end; dest = (void *)dest + ppc_inst_len(ppc_inst(PPC_INST_NOP))) - raw_patch_instruction(dest, ppc_inst(PPC_INST_NOP)); + nop = ppc_inst(PPC_INST_NOP); + for (; dest < end; dest = ppc_inst_next(dest, &nop)) + raw_patch_instruction(dest, nop); return 0; } @@ -405,8 +406,8 @@ static void do_final_fixups(void) while (src < end) { inst = ppc_inst_read(src); raw_patch_instruction(dest, inst); - src = (void *)src + ppc_inst_len(inst); - dest = (void *)dest + ppc_inst_len(inst); + src = ppc_inst_next(src, src); + dest = ppc_inst_next(dest, dest); } #endif } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index de585204d1d2..16ee6639a60c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -939,7 +939,7 @@ static void insert_bpts(void) } patch_instruction(bp->instr, instr); - patch_instruction((void *)bp->instr + ppc_inst_len(instr), + patch_instruction(ppc_inst_next(bp->instr, &instr), ppc_inst(bpinstr)); if (bp->enabled & BP_CIABR) continue; -- cgit v1.2.3-59-g8ed1b From 16ef9767e4dc5cf03a71ae7bc2bc588dbbe7983e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 26 May 2020 17:26:30 +1000 Subject: powerpc: Add ppc_inst_as_u64() The code patching code wants to get the value of a struct ppc_inst as a u64 when the instruction is prefixed, so we can pass the u64 down to __put_user_asm() and write it with a single store. The optprobes code wants to load a struct ppc_inst as an immediate into a register so it is useful to have it as a u64 to use the existing helper function. Currently this is a bit awkward because the value differs based on the CPU endianness, so add a helper to do the conversion. This fixes the usage in arch_prepare_optimized_kprobe() which was previously incorrect on big endian. Fixes: 650b55b707fd ("powerpc: Add prefixed instructions to instruction data type") Signed-off-by: Michael Ellerman Tested-by: Jordan Niethe Link: https://lore.kernel.org/r/20200526072630.2487363-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/inst.h | 9 +++++++++ arch/powerpc/kernel/optprobes.c | 3 +-- arch/powerpc/lib/code-patching.c | 8 +------- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index 5b756ba77ed2..45f3ec868258 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -113,6 +113,15 @@ static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *va return location + ppc_inst_len(tmp); } +static inline u64 ppc_inst_as_u64(struct ppc_inst x) +{ +#ifdef CONFIG_CPU_LITTLE_ENDIAN + return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x); +#else + return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x); +#endif +} + int probe_user_read_inst(struct ppc_inst *inst, struct ppc_inst __user *nip); diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 3ac105e7faae..69bfe96884e2 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -283,8 +283,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) * 3. load instruction to be emulated into relevant register, and */ temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn); - patch_imm64_load_insns(ppc_inst_val(temp) | ((u64)ppc_inst_suffix(temp) << 32), - 4, buff + TMPL_INSN_IDX); + patch_imm64_load_insns(ppc_inst_as_u64(temp), 4, buff + TMPL_INSN_IDX); /* * 4. branch back from trampoline diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 64cf621e5b00..5ecf0d635a8d 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -27,13 +27,7 @@ static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr if (!ppc_inst_prefixed(instr)) { __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw"); } else { -#ifdef CONFIG_CPU_LITTLE_ENDIAN - __put_user_asm((u64)ppc_inst_suffix(instr) << 32 | - ppc_inst_val(instr), patch_addr, err, "std"); -#else - __put_user_asm((u64)ppc_inst_val(instr) << 32 | - ppc_inst_suffix(instr), patch_addr, err, "std"); -#endif + __put_user_asm(ppc_inst_as_u64(instr), patch_addr, err, "std"); } if (err) -- cgit v1.2.3-59-g8ed1b From a101950fcb78b0ba20cd487be6627dea58d55c2b Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Wed, 29 Apr 2020 09:51:20 +0200 Subject: powerpc/xive: Clear the page tables for the ESB IO mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 1ca3dec2b2df ("powerpc/xive: Prevent page fault issues in the machine crash handler") fixed an issue in the FW assisted dump of machines using hash MMU and the XIVE interrupt mode under the POWER hypervisor. It forced the mapping of the ESB page of interrupts being mapped in the Linux IRQ number space to make sure the 'crash kexec' sequence worked during such an event. But it didn't handle the un-mapping. This mapping is now blocking the removal of a passthrough IO adapter under the POWER hypervisor because it expects the guest OS to have cleared all page table entries related to the adapter. If some are still present, the RTAS call which isolates the PCI slot returns error 9001 "valid outstanding translations". Remove these mapping in the IRQ data cleanup routine. Under KVM, this cleanup is not required because the ESB pages for the adapter interrupts are un-mapped from the guest by the hypervisor in the KVM XIVE native device. This is now redundant but it's harmless. Fixes: 1ca3dec2b2df ("powerpc/xive: Prevent page fault issues in the machine crash handler") Cc: stable@vger.kernel.org # v5.5+ Signed-off-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200429075122.1216388-2-clg@kaod.org --- arch/powerpc/sysdev/xive/common.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 9603b2830d03..3dbc94cb4380 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -1020,12 +1021,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq); void xive_cleanup_irq_data(struct xive_irq_data *xd) { if (xd->eoi_mmio) { + unmap_kernel_range((unsigned long)xd->eoi_mmio, + 1u << xd->esb_shift); iounmap(xd->eoi_mmio); if (xd->eoi_mmio == xd->trig_mmio) xd->trig_mmio = NULL; xd->eoi_mmio = NULL; } if (xd->trig_mmio) { + unmap_kernel_range((unsigned long)xd->trig_mmio, + 1u << xd->esb_shift); iounmap(xd->trig_mmio); xd->trig_mmio = NULL; } -- cgit v1.2.3-59-g8ed1b From 0755e85570a4615ca674ad6489d44d63916f1f3e Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Wed, 29 Apr 2020 09:51:22 +0200 Subject: powerpc/xive: Do not expose a debugfs file when XIVE is disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XIVE interrupt mode can be disabled with the "xive=off" kernel parameter, in which case there is nothing to present to the user in the associated /sys/kernel/debug/powerpc/xive file. Fixes: 930914b7d528 ("powerpc/xive: Add a debugfs file to dump internal XIVE state") Signed-off-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200429075122.1216388-4-clg@kaod.org --- arch/powerpc/sysdev/xive/common.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 3dbc94cb4380..f591be9f01f4 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -1664,7 +1664,8 @@ DEFINE_SHOW_ATTRIBUTE(xive_core_debug); int xive_core_debug_init(void) { - debugfs_create_file("xive", 0400, powerpc_debugfs_root, - NULL, &xive_core_debug_fops); + if (xive_enabled()) + debugfs_create_file("xive", 0400, powerpc_debugfs_root, + NULL, &xive_core_debug_fops); return 0; } -- cgit v1.2.3-59-g8ed1b From 0c444d98efad89e2a189d1a5a188e0385edac647 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 2 Oct 2019 14:48:54 -0700 Subject: macintosh/ams-input: switch to using input device polling mode Now that instances of input_dev support polling mode natively, we no longer need to create input_polled_dev instance. Signed-off-by: Dmitry Torokhov Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20191002214854.GA114387@dtor-ws --- drivers/macintosh/Kconfig | 1 - drivers/macintosh/ams/ams-input.c | 37 ++++++++++++++++++------------------- drivers/macintosh/ams/ams.h | 4 ++-- 3 files changed, 20 insertions(+), 22 deletions(-) diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index cbd46c1c5bf7..fcb9d7bd5bd0 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -247,7 +247,6 @@ config PMAC_RACKMETER config SENSORS_AMS tristate "Apple Motion Sensor driver" depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) - select INPUT_POLLDEV help Support for the motion sensor included in PowerBooks. Includes implementations for PMU and I2C. diff --git a/drivers/macintosh/ams/ams-input.c b/drivers/macintosh/ams/ams-input.c index 06a96b3f11de..0da493d449b2 100644 --- a/drivers/macintosh/ams/ams-input.c +++ b/drivers/macintosh/ams/ams-input.c @@ -25,9 +25,8 @@ MODULE_PARM_DESC(invert, "Invert input data on X and Y axis"); static DEFINE_MUTEX(ams_input_mutex); -static void ams_idev_poll(struct input_polled_dev *dev) +static void ams_idev_poll(struct input_dev *idev) { - struct input_dev *idev = dev->input; s8 x, y, z; mutex_lock(&ams_info.lock); @@ -59,14 +58,10 @@ static int ams_input_enable(void) ams_info.ycalib = y; ams_info.zcalib = z; - ams_info.idev = input_allocate_polled_device(); - if (!ams_info.idev) + input = input_allocate_device(); + if (!input) return -ENOMEM; - ams_info.idev->poll = ams_idev_poll; - ams_info.idev->poll_interval = 25; - - input = ams_info.idev->input; input->name = "Apple Motion Sensor"; input->id.bustype = ams_info.bustype; input->id.vendor = 0; @@ -75,28 +70,32 @@ static int ams_input_enable(void) input_set_abs_params(input, ABS_X, -50, 50, 3, 0); input_set_abs_params(input, ABS_Y, -50, 50, 3, 0); input_set_abs_params(input, ABS_Z, -50, 50, 3, 0); + input_set_capability(input, EV_KEY, BTN_TOUCH); - set_bit(EV_ABS, input->evbit); - set_bit(EV_KEY, input->evbit); - set_bit(BTN_TOUCH, input->keybit); + error = input_setup_polling(input, ams_idev_poll); + if (error) + goto err_free_input; - error = input_register_polled_device(ams_info.idev); - if (error) { - input_free_polled_device(ams_info.idev); - ams_info.idev = NULL; - return error; - } + input_set_poll_interval(input, 25); + error = input_register_device(input); + if (error) + goto err_free_input; + + ams_info.idev = input; joystick = true; return 0; + +err_free_input: + input_free_device(input); + return error; } static void ams_input_disable(void) { if (ams_info.idev) { - input_unregister_polled_device(ams_info.idev); - input_free_polled_device(ams_info.idev); + input_unregister_device(ams_info.idev); ams_info.idev = NULL; } diff --git a/drivers/macintosh/ams/ams.h b/drivers/macintosh/ams/ams.h index fe8d596f9845..935bdd9cd9a6 100644 --- a/drivers/macintosh/ams/ams.h +++ b/drivers/macintosh/ams/ams.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include -#include +#include #include #include #include @@ -51,7 +51,7 @@ struct ams { #endif /* Joystick emulation */ - struct input_polled_dev *idev; + struct input_dev *idev; __u16 bustype; /* calibrated null values */ -- cgit v1.2.3-59-g8ed1b From e4f4ffa8a98c24a4ab482669b1e2b4cfce3f52f4 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 18 May 2020 11:10:43 -0700 Subject: input: i8042 - Remove special PowerPC handling This causes a build error with CONFIG_WALNUT because kb_cs and kb_data were removed in commit 917f0af9e5a9 ("powerpc: Remove arch/ppc and include/asm-ppc"). ld.lld: error: undefined symbol: kb_cs > referenced by i8042-ppcio.h:28 (drivers/input/serio/i8042-ppcio.h:28) > input/serio/i8042.o:(__i8042_command) in archive drivers/built-in.a > referenced by i8042-ppcio.h:28 (drivers/input/serio/i8042-ppcio.h:28) > input/serio/i8042.o:(__i8042_command) in archive drivers/built-in.a > referenced by i8042-ppcio.h:28 (drivers/input/serio/i8042-ppcio.h:28) > input/serio/i8042.o:(__i8042_command) in archive drivers/built-in.a ld.lld: error: undefined symbol: kb_data > referenced by i8042.c:309 (drivers/input/serio/i8042.c:309) > input/serio/i8042.o:(__i8042_command) in archive drivers/built-in.a > referenced by i8042-ppcio.h:33 (drivers/input/serio/i8042-ppcio.h:33) > input/serio/i8042.o:(__i8042_command) in archive drivers/built-in.a > referenced by i8042.c:319 (drivers/input/serio/i8042.c:319) > input/serio/i8042.o:(__i8042_command) in archive drivers/built-in.a > referenced 15 more times Presumably since nobody has noticed this for the last 12 years, there is not anyone actually trying to use this driver so we can just remove this special walnut code and use the generic header so it builds for all configurations. Fixes: 917f0af9e5a9 ("powerpc: Remove arch/ppc and include/asm-ppc") Reported-by: kbuild test robot Signed-off-by: Nathan Chancellor Signed-off-by: Michael Ellerman Acked-by: Dmitry Torokhov Link: https://lore.kernel.org/r/20200518181043.3363953-1-natechancellor@gmail.com --- drivers/input/serio/i8042-ppcio.h | 57 --------------------------------------- drivers/input/serio/i8042.h | 2 -- 2 files changed, 59 deletions(-) delete mode 100644 drivers/input/serio/i8042-ppcio.h diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h deleted file mode 100644 index 391f94d9e47d..000000000000 --- a/drivers/input/serio/i8042-ppcio.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _I8042_PPCIO_H -#define _I8042_PPCIO_H - - -#if defined(CONFIG_WALNUT) - -#define I8042_KBD_IRQ 25 -#define I8042_AUX_IRQ 26 - -#define I8042_KBD_PHYS_DESC "walnutps2/serio0" -#define I8042_AUX_PHYS_DESC "walnutps2/serio1" -#define I8042_MUX_PHYS_DESC "walnutps2/serio%d" - -extern void *kb_cs; -extern void *kb_data; - -#define I8042_COMMAND_REG (*(int *)kb_cs) -#define I8042_DATA_REG (*(int *)kb_data) - -static inline int i8042_read_data(void) -{ - return readb(kb_data); -} - -static inline int i8042_read_status(void) -{ - return readb(kb_cs); -} - -static inline void i8042_write_data(int val) -{ - writeb(val, kb_data); -} - -static inline void i8042_write_command(int val) -{ - writeb(val, kb_cs); -} - -static inline int i8042_platform_init(void) -{ - i8042_reset = I8042_RESET_ALWAYS; - return 0; -} - -static inline void i8042_platform_exit(void) -{ -} - -#else - -#include "i8042-io.h" - -#endif - -#endif /* _I8042_PPCIO_H */ diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h index 38dc27ad3c18..eb376700dfff 100644 --- a/drivers/input/serio/i8042.h +++ b/drivers/input/serio/i8042.h @@ -17,8 +17,6 @@ #include "i8042-ip22io.h" #elif defined(CONFIG_SNI_RM) #include "i8042-snirm.h" -#elif defined(CONFIG_PPC) -#include "i8042-ppcio.h" #elif defined(CONFIG_SPARC) #include "i8042-sparcio.h" #elif defined(CONFIG_X86) || defined(CONFIG_IA64) -- cgit v1.2.3-59-g8ed1b From 18594f9b8c45484bd527ebc6b08383b95f58ba73 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 4 May 2020 22:29:07 +1000 Subject: powerpc/64s/radix: Don't prefetch DAR in update_mmu_cache The idea behind this prefetch was to kick off a page table walk before returning from the fault, getting some pipelining advantage. But this never showed up any noticable performance advantage, and in fact with KUAP the prefetches are actually blocked and cause some kind of micro-architectural fault. Removing this improves page fault microbenchmark performance by about 9%. Signed-off-by: Nicholas Piggin [mpe: Keep the early return in update_mmu_cache()] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200504122907.49304-1-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/pgtable.h | 7 +++++-- arch/powerpc/mm/book3s64/hash_utils.c | 4 +--- arch/powerpc/mm/book3s64/pgtable.c | 13 ------------- 3 files changed, 6 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index ec17fc343be0..c4b77fa0b9ad 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1159,8 +1159,11 @@ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); -extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd); +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmd) +{ +} + extern int hash__has_transparent_hugepage(void); static inline int has_transparent_hugepage(void) { diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 622c6e8e9fa6..0124003e60d0 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1634,10 +1634,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, unsigned long trap; bool is_exec; - if (radix_enabled()) { - prefetch((void *)address); + if (radix_enabled()) return; - } /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ if (!pte_young(*ptep) || address >= TASK_SIZE) diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 54b6d6d103ea..c58ad1049909 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -156,19 +156,6 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmdv &= _HPAGE_CHG_MASK; return pmd_set_protbits(__pmd(pmdv), newprot); } - -/* - * This is called at the end of handling a user page fault, when the - * fault has been handled by updating a HUGE PMD entry in the linux page tables. - * We use it to preload an HPTE into the hash table corresponding to - * the updated linux HUGE PMD entry. - */ -void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd) -{ - if (radix_enabled()) - prefetch((void *)addr); -} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* For use by kexec */ -- cgit v1.2.3-59-g8ed1b From 0bdad33d6bd7b80722e2f9e588d3d7c6d6e34978 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 29 Apr 2020 16:24:21 +1000 Subject: powerpc/64: Refactor interrupt exit irq disabling sequence The same complicated sequence for juggling EE, RI, soft mask, and irq tracing is repeated 3 times, tidy these up into one function. This differs qiute a bit between sub architectures, so this makes the ppc32 port cleaner as well. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200429062421.1675400-1-npiggin@gmail.com --- arch/powerpc/kernel/syscall_64.c | 58 +++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c index 7b7c89cad901..613da0d0fa8c 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -101,6 +101,31 @@ notrace long system_call_exception(long r3, long r4, long r5, return f(r3, r4, r5, r6, r7, r8); } +/* + * local irqs must be disabled. Returns false if the caller must re-enable + * them, check for new work, and try again. + */ +static notrace inline bool prep_irq_for_enabled_exit(void) +{ + /* This must be done with RI=1 because tracing may touch vmaps */ + trace_hardirqs_on(); + + /* This pattern matches prep_irq_for_idle */ + __hard_EE_RI_disable(); + if (unlikely(lazy_irq_pending_nocheck())) { + /* Took an interrupt, may have more exit work to do. */ + __hard_RI_enable(); + trace_hardirqs_off(); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + + return false; + } + local_paca->irq_happened = 0; + irq_soft_mask_set(IRQS_ENABLED); + + return true; +} + /* * This should be called after a syscall returns, with r3 the return value * from the syscall. If this function returns non-zero, the system call @@ -186,21 +211,10 @@ again: } } - /* This must be done with RI=1 because tracing may touch vmaps */ - trace_hardirqs_on(); - - /* This pattern matches prep_irq_for_idle */ - __hard_EE_RI_disable(); - if (unlikely(lazy_irq_pending_nocheck())) { - __hard_RI_enable(); - trace_hardirqs_off(); - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + if (unlikely(!prep_irq_for_enabled_exit())) { local_irq_enable(); - /* Took an interrupt, may have more exit work to do. */ goto again; } - local_paca->irq_happened = 0; - irq_soft_mask_set(IRQS_ENABLED); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM local_paca->tm_scratch = regs->msr; @@ -264,19 +278,11 @@ again: } } - trace_hardirqs_on(); - __hard_EE_RI_disable(); - if (unlikely(lazy_irq_pending_nocheck())) { - __hard_RI_enable(); - trace_hardirqs_off(); - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + if (unlikely(!prep_irq_for_enabled_exit())) { local_irq_enable(); local_irq_disable(); - /* Took an interrupt, may have more exit work to do. */ goto again; } - local_paca->irq_happened = 0; - irq_soft_mask_set(IRQS_ENABLED); #ifdef CONFIG_PPC_BOOK3E if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) { @@ -334,13 +340,7 @@ again: } } - trace_hardirqs_on(); - __hard_EE_RI_disable(); - if (unlikely(lazy_irq_pending_nocheck())) { - __hard_RI_enable(); - irq_soft_mask_set(IRQS_ALL_DISABLED); - trace_hardirqs_off(); - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + if (unlikely(!prep_irq_for_enabled_exit())) { /* * Can't local_irq_restore to replay if we were in * interrupt context. Must replay directly. @@ -354,8 +354,6 @@ again: /* Took an interrupt, may have more exit work to do. */ goto again; } - local_paca->irq_happened = 0; - irq_soft_mask_set(IRQS_ENABLED); } else { /* Returning to a kernel context with local irqs disabled. */ __hard_EE_RI_disable(); -- cgit v1.2.3-59-g8ed1b From 7ade8495dcfd788a76e6877c9ea86f5207369ea4 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 21 May 2020 16:55:52 +0000 Subject: powerpc: Remove Xilinx PPC405/PPC440 support The latest Xilinx design tools called ISE and EDK has been released in October 2013. New tool doesn't support any PPC405/PPC440 new designs. These platforms are no longer supported and tested. PowerPC 405/440 port is orphan from 2013 by commit cdeb89943bfc ("MAINTAINERS: Fix incorrect status tag") and commit 19624236cce1 ("MAINTAINERS: Update Grant's email address and maintainership") that's why it is time to remove the support fot these platforms. Signed-off-by: Michal Simek Signed-off-by: Christophe Leroy Acked-by: Arnd Bergmann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/8c593895e2cb57d232d85ce4d8c3a1aa7f0869cc.1590079968.git.christophe.leroy@csgroup.eu --- Documentation/devicetree/bindings/xilinx.txt | 143 -------- Documentation/powerpc/bootwrapper.rst | 28 +- arch/powerpc/Kconfig.debug | 2 +- arch/powerpc/boot/Makefile | 7 +- arch/powerpc/boot/dts/Makefile | 1 - arch/powerpc/boot/dts/virtex440-ml507.dts | 406 ----------------------- arch/powerpc/boot/dts/virtex440-ml510.dts | 466 --------------------------- arch/powerpc/boot/ops.h | 1 - arch/powerpc/boot/serial.c | 5 - arch/powerpc/boot/uartlite.c | 79 ----- arch/powerpc/boot/virtex.c | 97 ------ arch/powerpc/boot/virtex405-head.S | 31 -- arch/powerpc/boot/wrapper | 8 - arch/powerpc/configs/40x/virtex_defconfig | 75 ----- arch/powerpc/configs/44x/virtex5_defconfig | 74 ----- arch/powerpc/configs/ppc40x_defconfig | 8 - arch/powerpc/configs/ppc44x_defconfig | 8 - arch/powerpc/include/asm/xilinx_intc.h | 16 - arch/powerpc/include/asm/xilinx_pci.h | 21 -- arch/powerpc/kernel/cputable.c | 39 --- arch/powerpc/platforms/40x/Kconfig | 31 -- arch/powerpc/platforms/40x/Makefile | 1 - arch/powerpc/platforms/40x/virtex.c | 54 ---- arch/powerpc/platforms/44x/Kconfig | 37 --- arch/powerpc/platforms/44x/Makefile | 2 - arch/powerpc/platforms/44x/virtex.c | 60 ---- arch/powerpc/platforms/44x/virtex_ml510.c | 30 -- arch/powerpc/platforms/Kconfig | 4 - arch/powerpc/sysdev/Makefile | 2 - arch/powerpc/sysdev/xilinx_intc.c | 88 ----- arch/powerpc/sysdev/xilinx_pci.c | 132 -------- drivers/char/Kconfig | 2 +- drivers/video/fbdev/Kconfig | 2 +- 33 files changed, 7 insertions(+), 1953 deletions(-) delete mode 100644 arch/powerpc/boot/dts/virtex440-ml507.dts delete mode 100644 arch/powerpc/boot/dts/virtex440-ml510.dts delete mode 100644 arch/powerpc/boot/uartlite.c delete mode 100644 arch/powerpc/boot/virtex.c delete mode 100644 arch/powerpc/boot/virtex405-head.S delete mode 100644 arch/powerpc/configs/40x/virtex_defconfig delete mode 100644 arch/powerpc/configs/44x/virtex5_defconfig delete mode 100644 arch/powerpc/include/asm/xilinx_intc.h delete mode 100644 arch/powerpc/include/asm/xilinx_pci.h delete mode 100644 arch/powerpc/platforms/40x/virtex.c delete mode 100644 arch/powerpc/platforms/44x/virtex.c delete mode 100644 arch/powerpc/platforms/44x/virtex_ml510.c delete mode 100644 arch/powerpc/sysdev/xilinx_intc.c delete mode 100644 arch/powerpc/sysdev/xilinx_pci.c diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt index d058ace29345..28199b31fe5e 100644 --- a/Documentation/devicetree/bindings/xilinx.txt +++ b/Documentation/devicetree/bindings/xilinx.txt @@ -86,149 +86,6 @@ xlnx,use-parity = <0>; }; - Some IP cores actually implement 2 or more logical devices. In - this case, the device should still describe the whole IP core with - a single node and add a child node for each logical device. The - ranges property can be used to translate from parent IP-core to the - registers of each device. In addition, the parent node should be - compatible with the bus type 'xlnx,compound', and should contain - #address-cells and #size-cells, as with any other bus. (Note: this - makes the assumption that both logical devices have the same bus - binding. If this is not true, then separate nodes should be used - for each logical device). The 'cell-index' property can be used to - enumerate logical devices within an IP core. For example, the - following is the system.mhs entry for the dual ps2 controller found - on the ml403 reference design. - - BEGIN opb_ps2_dual_ref - PARAMETER INSTANCE = opb_ps2_dual_ref_0 - PARAMETER HW_VER = 1.00.a - PARAMETER C_BASEADDR = 0xA9000000 - PARAMETER C_HIGHADDR = 0xA9001FFF - BUS_INTERFACE SOPB = opb_v20_0 - PORT Sys_Intr1 = ps2_1_intr - PORT Sys_Intr2 = ps2_2_intr - PORT Clkin1 = ps2_clk_rx_1 - PORT Clkin2 = ps2_clk_rx_2 - PORT Clkpd1 = ps2_clk_tx_1 - PORT Clkpd2 = ps2_clk_tx_2 - PORT Rx1 = ps2_d_rx_1 - PORT Rx2 = ps2_d_rx_2 - PORT Txpd1 = ps2_d_tx_1 - PORT Txpd2 = ps2_d_tx_2 - END - - It would result in the following device tree nodes: - - opb_ps2_dual_ref_0: opb-ps2-dual-ref@a9000000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "xlnx,compound"; - ranges = <0 a9000000 2000>; - // If this device had extra parameters, then they would - // go here. - ps2@0 { - compatible = "xlnx,opb-ps2-dual-ref-1.00.a"; - reg = <0 40>; - interrupt-parent = <&opb_intc_0>; - interrupts = <3 0>; - cell-index = <0>; - }; - ps2@1000 { - compatible = "xlnx,opb-ps2-dual-ref-1.00.a"; - reg = <1000 40>; - interrupt-parent = <&opb_intc_0>; - interrupts = <3 0>; - cell-index = <0>; - }; - }; - - Also, the system.mhs file defines bus attachments from the processor - to the devices. The device tree structure should reflect the bus - attachments. Again an example; this system.mhs fragment: - - BEGIN ppc405_virtex4 - PARAMETER INSTANCE = ppc405_0 - PARAMETER HW_VER = 1.01.a - BUS_INTERFACE DPLB = plb_v34_0 - BUS_INTERFACE IPLB = plb_v34_0 - END - - BEGIN opb_intc - PARAMETER INSTANCE = opb_intc_0 - PARAMETER HW_VER = 1.00.c - PARAMETER C_BASEADDR = 0xD1000FC0 - PARAMETER C_HIGHADDR = 0xD1000FDF - BUS_INTERFACE SOPB = opb_v20_0 - END - - BEGIN opb_uart16550 - PARAMETER INSTANCE = opb_uart16550_0 - PARAMETER HW_VER = 1.00.d - PARAMETER C_BASEADDR = 0xa0000000 - PARAMETER C_HIGHADDR = 0xa0001FFF - BUS_INTERFACE SOPB = opb_v20_0 - END - - BEGIN plb_v34 - PARAMETER INSTANCE = plb_v34_0 - PARAMETER HW_VER = 1.02.a - END - - BEGIN plb_bram_if_cntlr - PARAMETER INSTANCE = plb_bram_if_cntlr_0 - PARAMETER HW_VER = 1.00.b - PARAMETER C_BASEADDR = 0xFFFF0000 - PARAMETER C_HIGHADDR = 0xFFFFFFFF - BUS_INTERFACE SPLB = plb_v34_0 - END - - BEGIN plb2opb_bridge - PARAMETER INSTANCE = plb2opb_bridge_0 - PARAMETER HW_VER = 1.01.a - PARAMETER C_RNG0_BASEADDR = 0x20000000 - PARAMETER C_RNG0_HIGHADDR = 0x3FFFFFFF - PARAMETER C_RNG1_BASEADDR = 0x60000000 - PARAMETER C_RNG1_HIGHADDR = 0x7FFFFFFF - PARAMETER C_RNG2_BASEADDR = 0x80000000 - PARAMETER C_RNG2_HIGHADDR = 0xBFFFFFFF - PARAMETER C_RNG3_BASEADDR = 0xC0000000 - PARAMETER C_RNG3_HIGHADDR = 0xDFFFFFFF - BUS_INTERFACE SPLB = plb_v34_0 - BUS_INTERFACE MOPB = opb_v20_0 - END - - Gives this device tree (some properties removed for clarity): - - plb@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "xlnx,plb-v34-1.02.a"; - device_type = "ibm,plb"; - ranges; // 1:1 translation - - plb_bram_if_cntrl_0: bram@ffff0000 { - reg = ; - } - - opb@20000000 { - #address-cells = <1>; - #size-cells = <1>; - ranges = <20000000 20000000 20000000 - 60000000 60000000 20000000 - 80000000 80000000 40000000 - c0000000 c0000000 20000000>; - - opb_uart16550_0: serial@a0000000 { - reg = ; - }; - - opb_intc_0: interrupt-controller@d1000fc0 { - reg = ; - }; - }; - }; - That covers the general approach to binding xilinx IP cores into the device tree. The following are bindings for specific devices: diff --git a/Documentation/powerpc/bootwrapper.rst b/Documentation/powerpc/bootwrapper.rst index a6292afba573..cdfa2bc8425f 100644 --- a/Documentation/powerpc/bootwrapper.rst +++ b/Documentation/powerpc/bootwrapper.rst @@ -70,28 +70,6 @@ Currently, the following image format targets exist: kernel with this image type and it depends entirely on the embedded device tree for all information. - The simpleImage is useful for booting systems with - an unknown firmware interface or for booting from - a debugger when no firmware is present (such as on - the Xilinx Virtex platform). The only assumption that - simpleImage makes is that RAM is correctly initialized - and that the MMU is either off or has RAM mapped to - base address 0. - - simpleImage also supports inserting special platform - specific initialization code to the start of the bootup - sequence. The virtex405 platform uses this feature to - ensure that the cache is invalidated before caching - is enabled. Platform specific initialization code is - added as part of the wrapper script and is keyed on - the image target name. For example, all - simpleImage.virtex405-* targets will add the - virtex405-head.S initialization code (This also means - that the dts file for virtex405 targets should be - named (virtex405-.dts). Search the wrapper - script for 'virtex405' and see the file - arch/powerpc/boot/virtex405-head.S for details. - treeImage.%; Image format for used with OpenBIOS firmware found on some ppc4xx hardware. This image embeds a device tree blob inside the image. @@ -116,10 +94,8 @@ Image types which embed a device tree blob (simpleImage, dtbImage, treeImage, and cuImage) all generate the device tree blob from a file in the arch/powerpc/boot/dts/ directory. The Makefile selects the correct device tree source based on the name of the target. Therefore, if the kernel is -built with 'make treeImage.walnut simpleImage.virtex405-ml403', then the -build system will use arch/powerpc/boot/dts/walnut.dts to build -treeImage.walnut and arch/powerpc/boot/dts/virtex405-ml403.dts to build -the simpleImage.virtex405-ml403. +built with 'make treeImage.walnut', then the build system will use +arch/powerpc/boot/dts/walnut.dts to build treeImage.walnut. Two special targets called 'zImage' and 'zImage.initrd' also exist. These targets build all the default images as selected by the kernel configuration. diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 0b063830eea8..b88900f4832f 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -230,7 +230,7 @@ config PPC_EARLY_DEBUG_40x help Select this to enable early debugging for IBM 40x chips via the inbuilt serial port. This works on chips with a 16550 compatible - UART. Xilinx chips with uartlite cannot use this option. + UART. config PPC_EARLY_DEBUG_CPM bool "Early serial debugging for Freescale CPM-based serial ports" diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index c53a1b8bba8b..d8077b7071dd 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -79,7 +79,6 @@ $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-akebono.o: BOOTCFLAGS += -mcpu=405 -$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405 # The pre-boot decompressors pull in a lot of kernel headers and other source # files. This creates a bit of a dependency headache since we need to copy @@ -129,14 +128,12 @@ src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c src-wlib-$(CONFIG_PPC_8xx) += mpc8xx.c planetcore.c fsl-soc.c src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c src-wlib-$(CONFIG_EMBEDDED6xx) += ugecon.c fsl-soc.c -src-wlib-$(CONFIG_XILINX_VIRTEX) += uartlite.c src-wlib-$(CONFIG_CPM) += cpm-serial.c src-plat-y := of.c epapr.c src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ treeboot-walnut.c cuboot-acadia.c \ - cuboot-kilauea.c simpleboot.c \ - virtex405-head.S virtex.c + cuboot-kilauea.c simpleboot.c src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \ cuboot-bamboo.c cuboot-sam440ep.c \ cuboot-sequoia.c cuboot-rainier.c \ @@ -144,7 +141,7 @@ src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \ cuboot-warp.c cuboot-yosemite.c \ treeboot-iss4xx.c treeboot-currituck.c \ treeboot-akebono.c \ - simpleboot.c fixed-head.S virtex.c + simpleboot.c fixed-head.S src-plat-$(CONFIG_PPC_8xx) += cuboot-8xx.c fixed-head.S ep88xc.c redboot-8xx.c src-plat-$(CONFIG_PPC_MPC52xx) += cuboot-52xx.c src-plat-$(CONFIG_PPC_82xx) += cuboot-pq2.c fixed-head.S ep8248e.c cuboot-824x.c diff --git a/arch/powerpc/boot/dts/Makefile b/arch/powerpc/boot/dts/Makefile index 1cbc0e4ce857..fb335d05aae8 100644 --- a/arch/powerpc/boot/dts/Makefile +++ b/arch/powerpc/boot/dts/Makefile @@ -4,4 +4,3 @@ subdir-y += fsl dtstree := $(srctree)/$(src) dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) -dtb-$(CONFIG_XILINX_VIRTEX440_GENERIC_BOARD) += virtex440-ml507.dtb virtex440-ml510.dtb diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts deleted file mode 100644 index 66f1c6312de6..000000000000 --- a/arch/powerpc/boot/dts/virtex440-ml507.dts +++ /dev/null @@ -1,406 +0,0 @@ -/* - * This file supports the Xilinx ML507 board with the 440 processor. - * A reference design for the FPGA is provided at http://git.xilinx.com. - * - * (C) Copyright 2008 Xilinx, Inc. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - * - * --- - * - * Device Tree Generator version: 1.1 - * - * CAUTION: This file is automatically generated by libgen. - * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6 - * - * XPS project directory: ml507_ppc440_emb_ref - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - compatible = "xlnx,virtex440"; - dcr-parent = <&ppc440_0>; - model = "testing"; - DDR2_SDRAM: memory@0 { - device_type = "memory"; - reg = < 0 0x10000000 >; - } ; - chosen { - bootargs = "console=ttyS0 root=/dev/ram"; - stdout-path = &RS232_Uart_1; - } ; - cpus { - #address-cells = <1>; - #cpus = <1>; - #size-cells = <0>; - ppc440_0: cpu@0 { - clock-frequency = <400000000>; - compatible = "PowerPC,440", "ibm,ppc440"; - d-cache-line-size = <0x20>; - d-cache-size = <0x8000>; - dcr-access-method = "native"; - dcr-controller ; - device_type = "cpu"; - i-cache-line-size = <0x20>; - i-cache-size = <0x8000>; - model = "PowerPC,440"; - reg = <0>; - timebase-frequency = <400000000>; - xlnx,apu-control = <1>; - xlnx,apu-udi-0 = <0>; - xlnx,apu-udi-1 = <0>; - xlnx,apu-udi-10 = <0>; - xlnx,apu-udi-11 = <0>; - xlnx,apu-udi-12 = <0>; - xlnx,apu-udi-13 = <0>; - xlnx,apu-udi-14 = <0>; - xlnx,apu-udi-15 = <0>; - xlnx,apu-udi-2 = <0>; - xlnx,apu-udi-3 = <0>; - xlnx,apu-udi-4 = <0>; - xlnx,apu-udi-5 = <0>; - xlnx,apu-udi-6 = <0>; - xlnx,apu-udi-7 = <0>; - xlnx,apu-udi-8 = <0>; - xlnx,apu-udi-9 = <0>; - xlnx,dcr-autolock-enable = <1>; - xlnx,dcu-rd-ld-cache-plb-prio = <0>; - xlnx,dcu-rd-noncache-plb-prio = <0>; - xlnx,dcu-rd-touch-plb-prio = <0>; - xlnx,dcu-rd-urgent-plb-prio = <0>; - xlnx,dcu-wr-flush-plb-prio = <0>; - xlnx,dcu-wr-store-plb-prio = <0>; - xlnx,dcu-wr-urgent-plb-prio = <0>; - xlnx,dma0-control = <0>; - xlnx,dma0-plb-prio = <0>; - xlnx,dma0-rxchannelctrl = <0x1010000>; - xlnx,dma0-rxirqtimer = <0x3ff>; - xlnx,dma0-txchannelctrl = <0x1010000>; - xlnx,dma0-txirqtimer = <0x3ff>; - xlnx,dma1-control = <0>; - xlnx,dma1-plb-prio = <0>; - xlnx,dma1-rxchannelctrl = <0x1010000>; - xlnx,dma1-rxirqtimer = <0x3ff>; - xlnx,dma1-txchannelctrl = <0x1010000>; - xlnx,dma1-txirqtimer = <0x3ff>; - xlnx,dma2-control = <0>; - xlnx,dma2-plb-prio = <0>; - xlnx,dma2-rxchannelctrl = <0x1010000>; - xlnx,dma2-rxirqtimer = <0x3ff>; - xlnx,dma2-txchannelctrl = <0x1010000>; - xlnx,dma2-txirqtimer = <0x3ff>; - xlnx,dma3-control = <0>; - xlnx,dma3-plb-prio = <0>; - xlnx,dma3-rxchannelctrl = <0x1010000>; - xlnx,dma3-rxirqtimer = <0x3ff>; - xlnx,dma3-txchannelctrl = <0x1010000>; - xlnx,dma3-txirqtimer = <0x3ff>; - xlnx,endian-reset = <0>; - xlnx,generate-plb-timespecs = <1>; - xlnx,icu-rd-fetch-plb-prio = <0>; - xlnx,icu-rd-spec-plb-prio = <0>; - xlnx,icu-rd-touch-plb-prio = <0>; - xlnx,interconnect-imask = <0xffffffff>; - xlnx,mplb-allow-lock-xfer = <1>; - xlnx,mplb-arb-mode = <0>; - xlnx,mplb-awidth = <0x20>; - xlnx,mplb-counter = <0x500>; - xlnx,mplb-dwidth = <0x80>; - xlnx,mplb-max-burst = <8>; - xlnx,mplb-native-dwidth = <0x80>; - xlnx,mplb-p2p = <0>; - xlnx,mplb-prio-dcur = <2>; - xlnx,mplb-prio-dcuw = <3>; - xlnx,mplb-prio-icu = <4>; - xlnx,mplb-prio-splb0 = <1>; - xlnx,mplb-prio-splb1 = <0>; - xlnx,mplb-read-pipe-enable = <1>; - xlnx,mplb-sync-tattribute = <0>; - xlnx,mplb-wdog-enable = <1>; - xlnx,mplb-write-pipe-enable = <1>; - xlnx,mplb-write-post-enable = <1>; - xlnx,num-dma = <1>; - xlnx,pir = <0xf>; - xlnx,ppc440mc-addr-base = <0>; - xlnx,ppc440mc-addr-high = <0xfffffff>; - xlnx,ppc440mc-arb-mode = <0>; - xlnx,ppc440mc-bank-conflict-mask = <0xc00000>; - xlnx,ppc440mc-control = <0xf810008f>; - xlnx,ppc440mc-max-burst = <8>; - xlnx,ppc440mc-prio-dcur = <2>; - xlnx,ppc440mc-prio-dcuw = <3>; - xlnx,ppc440mc-prio-icu = <4>; - xlnx,ppc440mc-prio-splb0 = <1>; - xlnx,ppc440mc-prio-splb1 = <0>; - xlnx,ppc440mc-row-conflict-mask = <0x3ffe00>; - xlnx,ppcdm-asyncmode = <0>; - xlnx,ppcds-asyncmode = <0>; - xlnx,user-reset = <0>; - DMA0: sdma@80 { - compatible = "xlnx,ll-dma-1.00.a"; - dcr-reg = < 0x80 0x11 >; - interrupt-parent = <&xps_intc_0>; - interrupts = < 10 2 11 2 >; - } ; - } ; - } ; - plb_v46_0: plb@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "xlnx,plb-v46-1.03.a", "simple-bus"; - ranges ; - DIP_Switches_8Bit: gpio@81460000 { - compatible = "xlnx,xps-gpio-1.00.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 7 2 >; - reg = < 0x81460000 0x10000 >; - xlnx,all-inputs = <1>; - xlnx,all-inputs-2 = <0>; - xlnx,dout-default = <0>; - xlnx,dout-default-2 = <0>; - xlnx,family = "virtex5"; - xlnx,gpio-width = <8>; - xlnx,interrupt-present = <1>; - xlnx,is-bidir = <1>; - xlnx,is-bidir-2 = <1>; - xlnx,is-dual = <0>; - xlnx,tri-default = <0xffffffff>; - xlnx,tri-default-2 = <0xffffffff>; - } ; - FLASH: flash@fc000000 { - bank-width = <2>; - compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash"; - reg = < 0xfc000000 0x2000000 >; - xlnx,family = "virtex5"; - xlnx,include-datawidth-matching-0 = <0x1>; - xlnx,include-datawidth-matching-1 = <0x0>; - xlnx,include-datawidth-matching-2 = <0x0>; - xlnx,include-datawidth-matching-3 = <0x0>; - xlnx,include-negedge-ioregs = <0x0>; - xlnx,include-plb-ipif = <0x1>; - xlnx,include-wrbuf = <0x1>; - xlnx,max-mem-width = <0x10>; - xlnx,mch-native-dwidth = <0x20>; - xlnx,mch-plb-clk-period-ps = <0x2710>; - xlnx,mch-splb-awidth = <0x20>; - xlnx,mch0-accessbuf-depth = <0x10>; - xlnx,mch0-protocol = <0x0>; - xlnx,mch0-rddatabuf-depth = <0x10>; - xlnx,mch1-accessbuf-depth = <0x10>; - xlnx,mch1-protocol = <0x0>; - xlnx,mch1-rddatabuf-depth = <0x10>; - xlnx,mch2-accessbuf-depth = <0x10>; - xlnx,mch2-protocol = <0x0>; - xlnx,mch2-rddatabuf-depth = <0x10>; - xlnx,mch3-accessbuf-depth = <0x10>; - xlnx,mch3-protocol = <0x0>; - xlnx,mch3-rddatabuf-depth = <0x10>; - xlnx,mem0-width = <0x10>; - xlnx,mem1-width = <0x20>; - xlnx,mem2-width = <0x20>; - xlnx,mem3-width = <0x20>; - xlnx,num-banks-mem = <0x1>; - xlnx,num-channels = <0x2>; - xlnx,priority-mode = <0x0>; - xlnx,synch-mem-0 = <0x0>; - xlnx,synch-mem-1 = <0x0>; - xlnx,synch-mem-2 = <0x0>; - xlnx,synch-mem-3 = <0x0>; - xlnx,synch-pipedelay-0 = <0x2>; - xlnx,synch-pipedelay-1 = <0x2>; - xlnx,synch-pipedelay-2 = <0x2>; - xlnx,synch-pipedelay-3 = <0x2>; - xlnx,tavdv-ps-mem-0 = <0x1adb0>; - xlnx,tavdv-ps-mem-1 = <0x3a98>; - xlnx,tavdv-ps-mem-2 = <0x3a98>; - xlnx,tavdv-ps-mem-3 = <0x3a98>; - xlnx,tcedv-ps-mem-0 = <0x1adb0>; - xlnx,tcedv-ps-mem-1 = <0x3a98>; - xlnx,tcedv-ps-mem-2 = <0x3a98>; - xlnx,tcedv-ps-mem-3 = <0x3a98>; - xlnx,thzce-ps-mem-0 = <0x88b8>; - xlnx,thzce-ps-mem-1 = <0x1b58>; - xlnx,thzce-ps-mem-2 = <0x1b58>; - xlnx,thzce-ps-mem-3 = <0x1b58>; - xlnx,thzoe-ps-mem-0 = <0x1b58>; - xlnx,thzoe-ps-mem-1 = <0x1b58>; - xlnx,thzoe-ps-mem-2 = <0x1b58>; - xlnx,thzoe-ps-mem-3 = <0x1b58>; - xlnx,tlzwe-ps-mem-0 = <0x88b8>; - xlnx,tlzwe-ps-mem-1 = <0x0>; - xlnx,tlzwe-ps-mem-2 = <0x0>; - xlnx,tlzwe-ps-mem-3 = <0x0>; - xlnx,twc-ps-mem-0 = <0x2af8>; - xlnx,twc-ps-mem-1 = <0x3a98>; - xlnx,twc-ps-mem-2 = <0x3a98>; - xlnx,twc-ps-mem-3 = <0x3a98>; - xlnx,twp-ps-mem-0 = <0x11170>; - xlnx,twp-ps-mem-1 = <0x2ee0>; - xlnx,twp-ps-mem-2 = <0x2ee0>; - xlnx,twp-ps-mem-3 = <0x2ee0>; - xlnx,xcl0-linesize = <0x4>; - xlnx,xcl0-writexfer = <0x1>; - xlnx,xcl1-linesize = <0x4>; - xlnx,xcl1-writexfer = <0x1>; - xlnx,xcl2-linesize = <0x4>; - xlnx,xcl2-writexfer = <0x1>; - xlnx,xcl3-linesize = <0x4>; - xlnx,xcl3-writexfer = <0x1>; - } ; - Hard_Ethernet_MAC: xps-ll-temac@81c00000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "xlnx,compound"; - ethernet@81c00000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "xlnx,xps-ll-temac-1.01.b"; - device_type = "network"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 5 2 >; - llink-connected = <&DMA0>; - local-mac-address = [ 02 00 00 00 00 00 ]; - reg = < 0x81c00000 0x40 >; - xlnx,bus2core-clk-ratio = <1>; - xlnx,phy-type = <1>; - xlnx,phyaddr = <1>; - xlnx,rxcsum = <1>; - xlnx,rxfifo = <0x1000>; - xlnx,temac-type = <0>; - xlnx,txcsum = <1>; - xlnx,txfifo = <0x1000>; - phy-handle = <&phy7>; - clock-frequency = <100000000>; - phy7: phy@7 { - compatible = "marvell,88e1111"; - reg = <7>; - } ; - } ; - } ; - IIC_EEPROM: i2c@81600000 { - compatible = "xlnx,xps-iic-2.00.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 6 2 >; - reg = < 0x81600000 0x10000 >; - xlnx,clk-freq = <0x5f5e100>; - xlnx,family = "virtex5"; - xlnx,gpo-width = <0x1>; - xlnx,iic-freq = <0x186a0>; - xlnx,scl-inertial-delay = <0x0>; - xlnx,sda-inertial-delay = <0x0>; - xlnx,ten-bit-adr = <0x0>; - } ; - LEDs_8Bit: gpio@81400000 { - compatible = "xlnx,xps-gpio-1.00.a"; - reg = < 0x81400000 0x10000 >; - xlnx,all-inputs = <0>; - xlnx,all-inputs-2 = <0>; - xlnx,dout-default = <0>; - xlnx,dout-default-2 = <0>; - xlnx,family = "virtex5"; - xlnx,gpio-width = <8>; - xlnx,interrupt-present = <0>; - xlnx,is-bidir = <1>; - xlnx,is-bidir-2 = <1>; - xlnx,is-dual = <0>; - xlnx,tri-default = <0xffffffff>; - xlnx,tri-default-2 = <0xffffffff>; - } ; - LEDs_Positions: gpio@81420000 { - compatible = "xlnx,xps-gpio-1.00.a"; - reg = < 0x81420000 0x10000 >; - xlnx,all-inputs = <0>; - xlnx,all-inputs-2 = <0>; - xlnx,dout-default = <0>; - xlnx,dout-default-2 = <0>; - xlnx,family = "virtex5"; - xlnx,gpio-width = <5>; - xlnx,interrupt-present = <0>; - xlnx,is-bidir = <1>; - xlnx,is-bidir-2 = <1>; - xlnx,is-dual = <0>; - xlnx,tri-default = <0xffffffff>; - xlnx,tri-default-2 = <0xffffffff>; - } ; - Push_Buttons_5Bit: gpio@81440000 { - compatible = "xlnx,xps-gpio-1.00.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 8 2 >; - reg = < 0x81440000 0x10000 >; - xlnx,all-inputs = <1>; - xlnx,all-inputs-2 = <0>; - xlnx,dout-default = <0>; - xlnx,dout-default-2 = <0>; - xlnx,family = "virtex5"; - xlnx,gpio-width = <5>; - xlnx,interrupt-present = <1>; - xlnx,is-bidir = <1>; - xlnx,is-bidir-2 = <1>; - xlnx,is-dual = <0>; - xlnx,tri-default = <0xffffffff>; - xlnx,tri-default-2 = <0xffffffff>; - } ; - RS232_Uart_1: serial@83e00000 { - clock-frequency = <100000000>; - compatible = "xlnx,xps-uart16550-2.00.b", "ns16550"; - current-speed = <9600>; - device_type = "serial"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 9 2 >; - reg = < 0x83e00000 0x10000 >; - reg-offset = <0x1003>; - reg-shift = <2>; - xlnx,family = "virtex5"; - xlnx,has-external-rclk = <0>; - xlnx,has-external-xin = <0>; - xlnx,is-a-16550 = <1>; - } ; - SysACE_CompactFlash: sysace@83600000 { - compatible = "xlnx,xps-sysace-1.00.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 4 2 >; - reg = < 0x83600000 0x10000 >; - xlnx,family = "virtex5"; - xlnx,mem-width = <0x10>; - } ; - xps_bram_if_cntlr_1: xps-bram-if-cntlr@ffff0000 { - compatible = "xlnx,xps-bram-if-cntlr-1.00.a"; - reg = < 0xffff0000 0x10000 >; - xlnx,family = "virtex5"; - } ; - xps_intc_0: interrupt-controller@81800000 { - #interrupt-cells = <2>; - compatible = "xlnx,xps-intc-1.00.a"; - interrupt-controller ; - reg = < 0x81800000 0x10000 >; - xlnx,num-intr-inputs = <0xc>; - } ; - xps_timebase_wdt_1: xps-timebase-wdt@83a00000 { - compatible = "xlnx,xps-timebase-wdt-1.00.b"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 2 0 1 2 >; - reg = < 0x83a00000 0x10000 >; - xlnx,family = "virtex5"; - xlnx,wdt-enable-once = <0>; - xlnx,wdt-interval = <0x1e>; - } ; - xps_timer_1: timer@83c00000 { - compatible = "xlnx,xps-timer-1.00.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 3 2 >; - reg = < 0x83c00000 0x10000 >; - xlnx,count-width = <0x20>; - xlnx,family = "virtex5"; - xlnx,gen0-assert = <1>; - xlnx,gen1-assert = <1>; - xlnx,one-timer-only = <1>; - xlnx,trig0-assert = <1>; - xlnx,trig1-assert = <1>; - } ; - } ; -} ; diff --git a/arch/powerpc/boot/dts/virtex440-ml510.dts b/arch/powerpc/boot/dts/virtex440-ml510.dts deleted file mode 100644 index 3b736ca26ddc..000000000000 --- a/arch/powerpc/boot/dts/virtex440-ml510.dts +++ /dev/null @@ -1,466 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Xilinx ML510 Reference Design support - * - * This DTS file was created for the ml510_bsb1_pcores_ppc440 reference design. - * The reference design contains a bug which prevent PCI DMA from working - * properly. A description of the bug is given in the plbv46_pci section. It - * needs to be fixed by the user until Xilinx updates their reference design. - * - * Copyright 2009, Roderick Colenbrander - */ - -/dts-v1/; -/ { - #address-cells = <1>; - #size-cells = <1>; - compatible = "xlnx,ml510-ref-design", "xlnx,virtex440"; - dcr-parent = <&ppc440_0>; - DDR2_SDRAM_DIMM0: memory@0 { - device_type = "memory"; - reg = < 0x0 0x20000000 >; - } ; - alias { - ethernet0 = &Hard_Ethernet_MAC; - serial0 = &RS232_Uart_1; - } ; - chosen { - bootargs = "console=ttyS0 root=/dev/ram"; - stdout-path = "/plb@0/serial@83e00000"; - } ; - cpus { - #address-cells = <1>; - #cpus = <0x1>; - #size-cells = <0>; - ppc440_0: cpu@0 { - #address-cells = <1>; - #size-cells = <1>; - clock-frequency = <300000000>; - compatible = "PowerPC,440", "ibm,ppc440"; - d-cache-line-size = <0x20>; - d-cache-size = <0x8000>; - dcr-access-method = "native"; - dcr-controller ; - device_type = "cpu"; - i-cache-line-size = <0x20>; - i-cache-size = <0x8000>; - model = "PowerPC,440"; - reg = <0>; - timebase-frequency = <300000000>; - xlnx,apu-control = <0x2000>; - xlnx,apu-udi-0 = <0x0>; - xlnx,apu-udi-1 = <0x0>; - xlnx,apu-udi-10 = <0x0>; - xlnx,apu-udi-11 = <0x0>; - xlnx,apu-udi-12 = <0x0>; - xlnx,apu-udi-13 = <0x0>; - xlnx,apu-udi-14 = <0x0>; - xlnx,apu-udi-15 = <0x0>; - xlnx,apu-udi-2 = <0x0>; - xlnx,apu-udi-3 = <0x0>; - xlnx,apu-udi-4 = <0x0>; - xlnx,apu-udi-5 = <0x0>; - xlnx,apu-udi-6 = <0x0>; - xlnx,apu-udi-7 = <0x0>; - xlnx,apu-udi-8 = <0x0>; - xlnx,apu-udi-9 = <0x0>; - xlnx,dcr-autolock-enable = <0x1>; - xlnx,dcu-rd-ld-cache-plb-prio = <0x0>; - xlnx,dcu-rd-noncache-plb-prio = <0x0>; - xlnx,dcu-rd-touch-plb-prio = <0x0>; - xlnx,dcu-rd-urgent-plb-prio = <0x0>; - xlnx,dcu-wr-flush-plb-prio = <0x0>; - xlnx,dcu-wr-store-plb-prio = <0x0>; - xlnx,dcu-wr-urgent-plb-prio = <0x0>; - xlnx,dma0-control = <0x0>; - xlnx,dma0-plb-prio = <0x0>; - xlnx,dma0-rxchannelctrl = <0x1010000>; - xlnx,dma0-rxirqtimer = <0x3ff>; - xlnx,dma0-txchannelctrl = <0x1010000>; - xlnx,dma0-txirqtimer = <0x3ff>; - xlnx,dma1-control = <0x0>; - xlnx,dma1-plb-prio = <0x0>; - xlnx,dma1-rxchannelctrl = <0x1010000>; - xlnx,dma1-rxirqtimer = <0x3ff>; - xlnx,dma1-txchannelctrl = <0x1010000>; - xlnx,dma1-txirqtimer = <0x3ff>; - xlnx,dma2-control = <0x0>; - xlnx,dma2-plb-prio = <0x0>; - xlnx,dma2-rxchannelctrl = <0x1010000>; - xlnx,dma2-rxirqtimer = <0x3ff>; - xlnx,dma2-txchannelctrl = <0x1010000>; - xlnx,dma2-txirqtimer = <0x3ff>; - xlnx,dma3-control = <0x0>; - xlnx,dma3-plb-prio = <0x0>; - xlnx,dma3-rxchannelctrl = <0x1010000>; - xlnx,dma3-rxirqtimer = <0x3ff>; - xlnx,dma3-txchannelctrl = <0x1010000>; - xlnx,dma3-txirqtimer = <0x3ff>; - xlnx,endian-reset = <0x0>; - xlnx,generate-plb-timespecs = <0x1>; - xlnx,icu-rd-fetch-plb-prio = <0x0>; - xlnx,icu-rd-spec-plb-prio = <0x0>; - xlnx,icu-rd-touch-plb-prio = <0x0>; - xlnx,interconnect-imask = <0xffffffff>; - xlnx,mplb-allow-lock-xfer = <0x1>; - xlnx,mplb-arb-mode = <0x0>; - xlnx,mplb-awidth = <0x20>; - xlnx,mplb-counter = <0x500>; - xlnx,mplb-dwidth = <0x80>; - xlnx,mplb-max-burst = <0x8>; - xlnx,mplb-native-dwidth = <0x80>; - xlnx,mplb-p2p = <0x0>; - xlnx,mplb-prio-dcur = <0x2>; - xlnx,mplb-prio-dcuw = <0x3>; - xlnx,mplb-prio-icu = <0x4>; - xlnx,mplb-prio-splb0 = <0x1>; - xlnx,mplb-prio-splb1 = <0x0>; - xlnx,mplb-read-pipe-enable = <0x1>; - xlnx,mplb-sync-tattribute = <0x0>; - xlnx,mplb-wdog-enable = <0x1>; - xlnx,mplb-write-pipe-enable = <0x1>; - xlnx,mplb-write-post-enable = <0x1>; - xlnx,num-dma = <0x0>; - xlnx,pir = <0xf>; - xlnx,ppc440mc-addr-base = <0x0>; - xlnx,ppc440mc-addr-high = <0x1fffffff>; - xlnx,ppc440mc-arb-mode = <0x0>; - xlnx,ppc440mc-bank-conflict-mask = <0x1800000>; - xlnx,ppc440mc-control = <0xf810008f>; - xlnx,ppc440mc-max-burst = <0x8>; - xlnx,ppc440mc-prio-dcur = <0x2>; - xlnx,ppc440mc-prio-dcuw = <0x3>; - xlnx,ppc440mc-prio-icu = <0x4>; - xlnx,ppc440mc-prio-splb0 = <0x1>; - xlnx,ppc440mc-prio-splb1 = <0x0>; - xlnx,ppc440mc-row-conflict-mask = <0x7ffe00>; - xlnx,ppcdm-asyncmode = <0x0>; - xlnx,ppcds-asyncmode = <0x0>; - xlnx,user-reset = <0x0>; - } ; - } ; - plb_v46_0: plb@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "xlnx,plb-v46-1.03.a", "simple-bus"; - ranges ; - FLASH: flash@fc000000 { - bank-width = <2>; - compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash"; - reg = < 0xfc000000 0x2000000 >; - xlnx,family = "virtex5"; - xlnx,include-datawidth-matching-0 = <0x1>; - xlnx,include-datawidth-matching-1 = <0x0>; - xlnx,include-datawidth-matching-2 = <0x0>; - xlnx,include-datawidth-matching-3 = <0x0>; - xlnx,include-negedge-ioregs = <0x0>; - xlnx,include-plb-ipif = <0x1>; - xlnx,include-wrbuf = <0x1>; - xlnx,max-mem-width = <0x10>; - xlnx,mch-native-dwidth = <0x20>; - xlnx,mch-plb-clk-period-ps = <0x2710>; - xlnx,mch-splb-awidth = <0x20>; - xlnx,mch0-accessbuf-depth = <0x10>; - xlnx,mch0-protocol = <0x0>; - xlnx,mch0-rddatabuf-depth = <0x10>; - xlnx,mch1-accessbuf-depth = <0x10>; - xlnx,mch1-protocol = <0x0>; - xlnx,mch1-rddatabuf-depth = <0x10>; - xlnx,mch2-accessbuf-depth = <0x10>; - xlnx,mch2-protocol = <0x0>; - xlnx,mch2-rddatabuf-depth = <0x10>; - xlnx,mch3-accessbuf-depth = <0x10>; - xlnx,mch3-protocol = <0x0>; - xlnx,mch3-rddatabuf-depth = <0x10>; - xlnx,mem0-width = <0x10>; - xlnx,mem1-width = <0x20>; - xlnx,mem2-width = <0x20>; - xlnx,mem3-width = <0x20>; - xlnx,num-banks-mem = <0x1>; - xlnx,num-channels = <0x2>; - xlnx,priority-mode = <0x0>; - xlnx,synch-mem-0 = <0x0>; - xlnx,synch-mem-1 = <0x0>; - xlnx,synch-mem-2 = <0x0>; - xlnx,synch-mem-3 = <0x0>; - xlnx,synch-pipedelay-0 = <0x2>; - xlnx,synch-pipedelay-1 = <0x2>; - xlnx,synch-pipedelay-2 = <0x2>; - xlnx,synch-pipedelay-3 = <0x2>; - xlnx,tavdv-ps-mem-0 = <0x1adb0>; - xlnx,tavdv-ps-mem-1 = <0x3a98>; - xlnx,tavdv-ps-mem-2 = <0x3a98>; - xlnx,tavdv-ps-mem-3 = <0x3a98>; - xlnx,tcedv-ps-mem-0 = <0x1adb0>; - xlnx,tcedv-ps-mem-1 = <0x3a98>; - xlnx,tcedv-ps-mem-2 = <0x3a98>; - xlnx,tcedv-ps-mem-3 = <0x3a98>; - xlnx,thzce-ps-mem-0 = <0x88b8>; - xlnx,thzce-ps-mem-1 = <0x1b58>; - xlnx,thzce-ps-mem-2 = <0x1b58>; - xlnx,thzce-ps-mem-3 = <0x1b58>; - xlnx,thzoe-ps-mem-0 = <0x1b58>; - xlnx,thzoe-ps-mem-1 = <0x1b58>; - xlnx,thzoe-ps-mem-2 = <0x1b58>; - xlnx,thzoe-ps-mem-3 = <0x1b58>; - xlnx,tlzwe-ps-mem-0 = <0x88b8>; - xlnx,tlzwe-ps-mem-1 = <0x0>; - xlnx,tlzwe-ps-mem-2 = <0x0>; - xlnx,tlzwe-ps-mem-3 = <0x0>; - xlnx,twc-ps-mem-0 = <0x1adb0>; - xlnx,twc-ps-mem-1 = <0x3a98>; - xlnx,twc-ps-mem-2 = <0x3a98>; - xlnx,twc-ps-mem-3 = <0x3a98>; - xlnx,twp-ps-mem-0 = <0x11170>; - xlnx,twp-ps-mem-1 = <0x2ee0>; - xlnx,twp-ps-mem-2 = <0x2ee0>; - xlnx,twp-ps-mem-3 = <0x2ee0>; - xlnx,xcl0-linesize = <0x4>; - xlnx,xcl0-writexfer = <0x1>; - xlnx,xcl1-linesize = <0x4>; - xlnx,xcl1-writexfer = <0x1>; - xlnx,xcl2-linesize = <0x4>; - xlnx,xcl2-writexfer = <0x1>; - xlnx,xcl3-linesize = <0x4>; - xlnx,xcl3-writexfer = <0x1>; - } ; - Hard_Ethernet_MAC: xps-ll-temac@81c00000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "xlnx,compound"; - ethernet@81c00000 { - compatible = "xlnx,xps-ll-temac-1.01.b"; - device_type = "network"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 8 2 >; - llink-connected = <&Hard_Ethernet_MAC_fifo>; - local-mac-address = [ 02 00 00 00 00 00 ]; - reg = < 0x81c00000 0x40 >; - xlnx,bus2core-clk-ratio = <0x1>; - xlnx,phy-type = <0x3>; - xlnx,phyaddr = <0x1>; - xlnx,rxcsum = <0x0>; - xlnx,rxfifo = <0x8000>; - xlnx,temac-type = <0x0>; - xlnx,txcsum = <0x0>; - xlnx,txfifo = <0x8000>; - } ; - } ; - Hard_Ethernet_MAC_fifo: xps-ll-fifo@81a00000 { - compatible = "xlnx,xps-ll-fifo-1.01.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 6 2 >; - reg = < 0x81a00000 0x10000 >; - xlnx,family = "virtex5"; - } ; - IIC_EEPROM: i2c@81600000 { - compatible = "xlnx,xps-iic-2.00.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 9 2 >; - reg = < 0x81600000 0x10000 >; - xlnx,clk-freq = <0x5f5e100>; - xlnx,family = "virtex5"; - xlnx,gpo-width = <0x1>; - xlnx,iic-freq = <0x186a0>; - xlnx,scl-inertial-delay = <0x5>; - xlnx,sda-inertial-delay = <0x5>; - xlnx,ten-bit-adr = <0x0>; - } ; - LCD_OPTIONAL: gpio@81420000 { - compatible = "xlnx,xps-gpio-1.00.a"; - reg = < 0x81420000 0x10000 >; - xlnx,all-inputs = <0x0>; - xlnx,all-inputs-2 = <0x0>; - xlnx,dout-default = <0x0>; - xlnx,dout-default-2 = <0x0>; - xlnx,family = "virtex5"; - xlnx,gpio-width = <0xb>; - xlnx,interrupt-present = <0x0>; - xlnx,is-bidir = <0x1>; - xlnx,is-bidir-2 = <0x1>; - xlnx,is-dual = <0x0>; - xlnx,tri-default = <0xffffffff>; - xlnx,tri-default-2 = <0xffffffff>; - } ; - LEDs_4Bit: gpio@81400000 { - compatible = "xlnx,xps-gpio-1.00.a"; - reg = < 0x81400000 0x10000 >; - xlnx,all-inputs = <0x0>; - xlnx,all-inputs-2 = <0x0>; - xlnx,dout-default = <0x0>; - xlnx,dout-default-2 = <0x0>; - xlnx,family = "virtex5"; - xlnx,gpio-width = <0x4>; - xlnx,interrupt-present = <0x0>; - xlnx,is-bidir = <0x1>; - xlnx,is-bidir-2 = <0x1>; - xlnx,is-dual = <0x0>; - xlnx,tri-default = <0xffffffff>; - xlnx,tri-default-2 = <0xffffffff>; - } ; - RS232_Uart_1: serial@83e00000 { - clock-frequency = <100000000>; - compatible = "xlnx,xps-uart16550-2.00.b", "ns16550"; - current-speed = <9600>; - device_type = "serial"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 11 2 >; - reg = < 0x83e00000 0x10000 >; - reg-offset = <0x1003>; - reg-shift = <2>; - xlnx,family = "virtex5"; - xlnx,has-external-rclk = <0x0>; - xlnx,has-external-xin = <0x0>; - xlnx,is-a-16550 = <0x1>; - } ; - SPI_EEPROM: xps-spi@feff8000 { - compatible = "xlnx,xps-spi-2.00.b"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 10 2 >; - reg = < 0xfeff8000 0x80 >; - xlnx,family = "virtex5"; - xlnx,fifo-exist = <0x1>; - xlnx,num-ss-bits = <0x1>; - xlnx,num-transfer-bits = <0x8>; - xlnx,sck-ratio = <0x80>; - } ; - SysACE_CompactFlash: sysace@83600000 { - compatible = "xlnx,xps-sysace-1.00.a"; - interrupt-parent = <&xps_intc_0>; - interrupts = < 7 2 >; - reg = < 0x83600000 0x10000 >; - xlnx,family = "virtex5"; - xlnx,mem-width = <0x10>; - } ; - plbv46_pci_0: plbv46-pci@85e00000 { - #size-cells = <2>; - #address-cells = <3>; - compatible = "xlnx,plbv46-pci-1.03.a"; - device_type = "pci"; - reg = < 0x85e00000 0x10000 >; - - /* - * The default ML510 BSB has C_IPIFBAR2PCIBAR_0 set to - * 0 which means that a read/write to the memory mapped - * i/o region (which starts at 0xa0000000) for pci - * bar 0 on the plb side translates to 0. - * It is important to set this value to 0xa0000000, so - * that inbound and outbound pci transactions work - * properly including DMA. - */ - ranges = <0x02000000 0 0xa0000000 0xa0000000 0 0x20000000 - 0x01000000 0 0x00000000 0xf0000000 0 0x00010000>; - - #interrupt-cells = <1>; - interrupt-parent = <&xps_intc_0>; - interrupt-map-mask = <0xff00 0x0 0x0 0x7>; - interrupt-map = < - /* IRQ mapping for pci slots and ALI M1533 - * periperhals. In total there are 5 interrupt - * lines connected to a xps_intc controller. - * Four of them are PCI IRQ A, B, C, D and - * which correspond to respectively xpx_intc - * 5, 4, 3 and 2. The fifth interrupt line is - * connected to the south bridge and this one - * uses irq 1 and is active high instead of - * active low. - * - * The M1533 contains various peripherals - * including AC97 audio, a modem, USB, IDE and - * some power management stuff. The modem - * isn't connected on the ML510 and the power - * management core also isn't used. - */ - - /* IDSEL 0x16 / dev=6, bus=0 / PCI slot 3 */ - 0x3000 0 0 1 &xps_intc_0 3 2 - 0x3000 0 0 2 &xps_intc_0 2 2 - 0x3000 0 0 3 &xps_intc_0 5 2 - 0x3000 0 0 4 &xps_intc_0 4 2 - - /* IDSEL 0x13 / dev=3, bus=1 / PCI slot 4 */ - /* - 0x11800 0 0 1 &xps_intc_0 5 0 2 - 0x11800 0 0 2 &xps_intc_0 4 0 2 - 0x11800 0 0 3 &xps_intc_0 3 0 2 - 0x11800 0 0 4 &xps_intc_0 2 0 2 - */ - - /* According to the datasheet + schematic - * ABCD [FPGA] of slot 5 is mapped to DABC. - * Testing showed that at least A maps to B, - * the mapping of the other pins is a guess - * and for that reason the lines have been - * commented out. - */ - /* IDSEL 0x15 / dev=5, bus=0 / PCI slot 5 */ - 0x2800 0 0 1 &xps_intc_0 4 2 - /* - 0x2800 0 0 2 &xps_intc_0 3 2 - 0x2800 0 0 3 &xps_intc_0 2 2 - 0x2800 0 0 4 &xps_intc_0 5 2 - */ - - /* IDSEL 0x12 / dev=2, bus=1 / PCI slot 6 */ - /* - 0x11000 0 0 1 &xps_intc_0 4 0 2 - 0x11000 0 0 2 &xps_intc_0 3 0 2 - 0x11000 0 0 3 &xps_intc_0 2 0 2 - 0x11000 0 0 4 &xps_intc_0 5 0 2 - */ - - /* IDSEL 0x11 / dev=1, bus=0 / AC97 audio */ - 0x0800 0 0 1 &i8259 7 2 - - /* IDSEL 0x1b / dev=11, bus=0 / IDE */ - 0x5800 0 0 1 &i8259 14 2 - - /* IDSEL 0x1f / dev 15, bus=0 / 2x USB 1.1 */ - 0x7800 0 0 1 &i8259 7 2 - >; - ali_m1533 { - #size-cells = <1>; - #address-cells = <2>; - i8259: interrupt-controller@20 { - reg = <1 0x20 2 - 1 0xa0 2 - 1 0x4d0 2>; - interrupt-controller; - device_type = "interrupt-controller"; - #address-cells = <0>; - #interrupt-cells = <2>; - compatible = "chrp,iic"; - - /* south bridge irq is active high */ - interrupts = <1 3>; - interrupt-parent = <&xps_intc_0>; - }; - }; - } ; - xps_bram_if_cntlr_1: xps-bram-if-cntlr@ffff0000 { - compatible = "xlnx,xps-bram-if-cntlr-1.00.a"; - reg = < 0xffff0000 0x10000 >; - xlnx,family = "virtex5"; - } ; - xps_intc_0: interrupt-controller@81800000 { - #interrupt-cells = <0x2>; - compatible = "xlnx,xps-intc-1.00.a"; - interrupt-controller ; - reg = < 0x81800000 0x10000 >; - xlnx,num-intr-inputs = <0xc>; - } ; - xps_tft_0: tft@86e00000 { - compatible = "xlnx,xps-tft-1.00.a"; - reg = < 0x86e00000 0x10000 >; - xlnx,dcr-splb-slave-if = <0x1>; - xlnx,default-tft-base-addr = <0x0>; - xlnx,family = "virtex5"; - xlnx,i2c-slave-addr = <0x76>; - xlnx,mplb-awidth = <0x20>; - xlnx,mplb-dwidth = <0x80>; - xlnx,mplb-native-dwidth = <0x40>; - xlnx,mplb-smallest-slave = <0x20>; - xlnx,tft-interface = <0x1>; - } ; - } ; -} ; diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index e0606766480f..6455fc9a244f 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h @@ -88,7 +88,6 @@ int serial_console_init(void); int ns16550_console_init(void *devp, struct serial_console_data *scdp); int cpm_console_init(void *devp, struct serial_console_data *scdp); int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp); -int uartlite_console_init(void *devp, struct serial_console_data *scdp); int opal_console_init(void *devp, struct serial_console_data *scdp); void *simple_alloc_init(char *base, unsigned long heap_size, unsigned long granularity, unsigned long max_allocs); diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 9457863147f9..0bfa7e87e546 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -132,11 +132,6 @@ int serial_console_init(void) else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart")) rc = mpc5200_psc_console_init(devp, &serial_cd); #endif -#ifdef CONFIG_XILINX_VIRTEX - else if (dt_is_compatible(devp, "xlnx,opb-uartlite-1.00.b") || - dt_is_compatible(devp, "xlnx,xps-uartlite-1.00.a")) - rc = uartlite_console_init(devp, &serial_cd); -#endif #ifdef CONFIG_PPC64_BOOT_WRAPPER else if (dt_is_compatible(devp, "ibm,opal-console-raw")) rc = opal_console_init(devp, &serial_cd); diff --git a/arch/powerpc/boot/uartlite.c b/arch/powerpc/boot/uartlite.c deleted file mode 100644 index 46bed69b4169..000000000000 --- a/arch/powerpc/boot/uartlite.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Xilinx UARTLITE bootloader driver - * - * Copyright (C) 2007 Secret Lab Technologies Ltd. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include -#include -#include "types.h" -#include "string.h" -#include "stdio.h" -#include "io.h" -#include "ops.h" - -#define ULITE_RX 0x00 -#define ULITE_TX 0x04 -#define ULITE_STATUS 0x08 -#define ULITE_CONTROL 0x0c - -#define ULITE_STATUS_RXVALID 0x01 -#define ULITE_STATUS_TXFULL 0x08 - -#define ULITE_CONTROL_RST_RX 0x02 - -static void * reg_base; - -static int uartlite_open(void) -{ - /* Clear the RX FIFO */ - out_be32(reg_base + ULITE_CONTROL, ULITE_CONTROL_RST_RX); - return 0; -} - -static void uartlite_putc(unsigned char c) -{ - u32 reg = ULITE_STATUS_TXFULL; - while (reg & ULITE_STATUS_TXFULL) /* spin on TXFULL bit */ - reg = in_be32(reg_base + ULITE_STATUS); - out_be32(reg_base + ULITE_TX, c); -} - -static unsigned char uartlite_getc(void) -{ - u32 reg = 0; - while (!(reg & ULITE_STATUS_RXVALID)) /* spin waiting for RXVALID bit */ - reg = in_be32(reg_base + ULITE_STATUS); - return in_be32(reg_base + ULITE_RX); -} - -static u8 uartlite_tstc(void) -{ - u32 reg = in_be32(reg_base + ULITE_STATUS); - return reg & ULITE_STATUS_RXVALID; -} - -int uartlite_console_init(void *devp, struct serial_console_data *scdp) -{ - int n; - unsigned long reg_phys; - - n = getprop(devp, "virtual-reg", ®_base, sizeof(reg_base)); - if (n != sizeof(reg_base)) { - if (!dt_xlate_reg(devp, 0, ®_phys, NULL)) - return -1; - - reg_base = (void *)reg_phys; - } - - scdp->open = uartlite_open; - scdp->putc = uartlite_putc; - scdp->getc = uartlite_getc; - scdp->tstc = uartlite_tstc; - scdp->close = NULL; - return 0; -} diff --git a/arch/powerpc/boot/virtex.c b/arch/powerpc/boot/virtex.c deleted file mode 100644 index f731cbb4bff0..000000000000 --- a/arch/powerpc/boot/virtex.c +++ /dev/null @@ -1,97 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * The platform specific code for virtex devices since a boot loader is not - * always used. - * - * (C) Copyright 2008 Xilinx, Inc. - */ - -#include "ops.h" -#include "io.h" -#include "stdio.h" - -#define UART_DLL 0 /* Out: Divisor Latch Low */ -#define UART_DLM 1 /* Out: Divisor Latch High */ -#define UART_FCR 2 /* Out: FIFO Control Register */ -#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ -#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */ -#define UART_LCR 3 /* Out: Line Control Register */ -#define UART_MCR 4 /* Out: Modem Control Register */ -#define UART_MCR_RTS 0x02 /* RTS complement */ -#define UART_MCR_DTR 0x01 /* DTR complement */ -#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ -#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */ - -static int virtex_ns16550_console_init(void *devp) -{ - unsigned char *reg_base; - u32 reg_shift, reg_offset, clk, spd; - u16 divisor; - int n; - - if (dt_get_virtual_reg(devp, (void **)®_base, 1) < 1) - return -1; - - n = getprop(devp, "reg-offset", ®_offset, sizeof(reg_offset)); - if (n == sizeof(reg_offset)) - reg_base += reg_offset; - - n = getprop(devp, "reg-shift", ®_shift, sizeof(reg_shift)); - if (n != sizeof(reg_shift)) - reg_shift = 0; - - n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd)); - if (n != sizeof(spd)) - spd = 9600; - - /* should there be a default clock rate?*/ - n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk)); - if (n != sizeof(clk)) - return -1; - - divisor = clk / (16 * spd); - - /* Access baud rate */ - out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_DLAB); - - /* Baud rate based on input clock */ - out_8(reg_base + (UART_DLL << reg_shift), divisor & 0xFF); - out_8(reg_base + (UART_DLM << reg_shift), divisor >> 8); - - /* 8 data, 1 stop, no parity */ - out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_WLEN8); - - /* RTS/DTR */ - out_8(reg_base + (UART_MCR << reg_shift), UART_MCR_RTS | UART_MCR_DTR); - - /* Clear transmitter and receiver */ - out_8(reg_base + (UART_FCR << reg_shift), - UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); - return 0; -} - -/* For virtex, the kernel may be loaded without using a bootloader and if so - some UARTs need more setup than is provided in the normal console init -*/ -int platform_specific_init(void) -{ - void *devp; - char devtype[MAX_PROP_LEN]; - char path[MAX_PATH_LEN]; - - devp = finddevice("/chosen"); - if (devp == NULL) - return -1; - - if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) { - devp = finddevice(path); - if (devp == NULL) - return -1; - - if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0) - && !strcmp(devtype, "serial") - && (dt_is_compatible(devp, "ns16550"))) - virtex_ns16550_console_init(devp); - } - return 0; -} diff --git a/arch/powerpc/boot/virtex405-head.S b/arch/powerpc/boot/virtex405-head.S deleted file mode 100644 index 00bab7d7c48c..000000000000 --- a/arch/powerpc/boot/virtex405-head.S +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include "ppc_asm.h" - - .text - .global _zimage_start -_zimage_start: - - /* PPC errata 213: needed by Virtex-4 FX */ - mfccr0 0 - oris 0,0,0x50000000@h - mtccr0 0 - - /* - * Invalidate the data cache if the data cache is turned off. - * - The 405 core does not invalidate the data cache on power-up - * or reset but does turn off the data cache. We cannot assume - * that the cache contents are valid. - * - If the data cache is turned on this must have been done by - * a bootloader and we assume that the cache contents are - * valid. - */ - mfdccr r9 - cmplwi r9,0 - bne 2f - lis r9,0 - li r8,256 - mtctr r8 -1: dccci r0,r9 - addi r9,r9,0x20 - bdnz 1b -2: b _zimage_start_lib diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index d0b5f202c49c..cd58a62e810d 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -324,14 +324,6 @@ adder875-redboot) platformo="$object/fixed-head.o $object/redboot-8xx.o" binary=y ;; -simpleboot-virtex405-*) - platformo="$object/virtex405-head.o $object/simpleboot.o $object/virtex.o" - binary=y - ;; -simpleboot-virtex440-*) - platformo="$object/fixed-head.o $object/simpleboot.o $object/virtex.o" - binary=y - ;; simpleboot-*) platformo="$object/fixed-head.o $object/simpleboot.o" binary=y diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig deleted file mode 100644 index 5e7c61d1d7d0..000000000000 --- a/arch/powerpc/configs/40x/virtex_defconfig +++ /dev/null @@ -1,75 +0,0 @@ -CONFIG_40x=y -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_WALNUT is not set -CONFIG_XILINX_VIRTEX_GENERIC_BOARD=y -CONFIG_PREEMPT=y -CONFIG_MATH_EMULATION=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" -CONFIG_PCI=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NETFILTER=y -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_MANGLE=m -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_XILINX_SYSACE=y -CONFIG_NETDEVICES=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_SERIO_XILINX_XPS_PS2=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_SERIAL_UARTLITE=y -CONFIG_SERIAL_UARTLITE_CONSOLE=y -CONFIG_XILINX_HWICAP=y -CONFIG_GPIOLIB=y -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_XILINX=y -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FB_XILINX=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y -# CONFIG_USB_SUPPORT is not set -CONFIG_EXT2_FS=y -CONFIG_AUTOFS4_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_ROMFS_FS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_UTF8=m -CONFIG_CRC_CCITT=y -CONFIG_FONTS=y -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_KERNEL=y diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig deleted file mode 100644 index 1f74079e1703..000000000000 --- a/arch/powerpc/configs/44x/virtex5_defconfig +++ /dev/null @@ -1,74 +0,0 @@ -CONFIG_44x=y -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_EBONY is not set -CONFIG_XILINX_VIRTEX440_GENERIC_BOARD=y -CONFIG_PREEMPT=y -CONFIG_MATH_EMULATION=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NETFILTER=y -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_MANGLE=m -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_XILINX_SYSACE=y -CONFIG_NETDEVICES=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_SERIO_XILINX_XPS_PS2=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_SERIAL_UARTLITE=y -CONFIG_SERIAL_UARTLITE_CONSOLE=y -CONFIG_XILINX_HWICAP=y -CONFIG_GPIOLIB=y -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_XILINX=y -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FB_XILINX=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y -# CONFIG_USB_SUPPORT is not set -CONFIG_EXT2_FS=y -CONFIG_AUTOFS4_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_ROMFS_FS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_UTF8=m -CONFIG_CRC_CCITT=y -CONFIG_FONTS=y -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_KERNEL=y diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig index a5f683aed328..88960a72b525 100644 --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@ -14,7 +14,6 @@ CONFIG_EP405=y CONFIG_HOTFOOT=y CONFIG_KILAUEA=y CONFIG_MAKALU=y -CONFIG_XILINX_VIRTEX_GENERIC_BOARD=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -37,33 +36,26 @@ CONFIG_MTD_UBI=m CONFIG_MTD_UBI_GLUEBI=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_XILINX_SYSACE=m CONFIG_NETDEVICES=y CONFIG_IBM_EMAC=y # CONFIG_INPUT is not set CONFIG_SERIO=m # CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set -CONFIG_SERIO_XILINX_XPS_PS2=m # CONFIG_VT is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_SERIAL_UARTLITE=y -CONFIG_SERIAL_UARTLITE_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_XILINX_HWICAP=m CONFIG_I2C=m CONFIG_I2C_CHARDEV=m CONFIG_I2C_GPIO=m CONFIG_I2C_IBM_IIC=m -CONFIG_GPIO_XILINX=y # CONFIG_HWMON is not set CONFIG_THERMAL=y CONFIG_FB=m -CONFIG_FB_XILINX=m CONFIG_EXT2_FS=y CONFIG_EXT4_FS=m CONFIG_VFAT_FS=m diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig index a41eedfe0a5f..8b595f67068c 100644 --- a/arch/powerpc/configs/ppc44x_defconfig +++ b/arch/powerpc/configs/ppc44x_defconfig @@ -22,7 +22,6 @@ CONFIG_GLACIER=y CONFIG_REDWOOD=y CONFIG_EIGER=y CONFIG_YOSEMITE=y -CONFIG_XILINX_VIRTEX440_GENERIC_BOARD=y CONFIG_PPC4xx_GPIO=y CONFIG_MATH_EMULATION=y CONFIG_NET=y @@ -46,7 +45,6 @@ CONFIG_MTD_UBI=m CONFIG_MTD_UBI_GLUEBI=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_XILINX_SYSACE=m CONFIG_SCSI=m CONFIG_BLK_DEV_SD=m # CONFIG_SCSI_LOWLEVEL is not set @@ -57,7 +55,6 @@ CONFIG_IBM_EMAC=y CONFIG_SERIO=m # CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set -CONFIG_SERIO_XILINX_XPS_PS2=m # CONFIG_VT is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y @@ -65,18 +62,13 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_SERIAL_UARTLITE=y -CONFIG_SERIAL_UARTLITE_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_XILINX_HWICAP=m CONFIG_I2C=m CONFIG_I2C_CHARDEV=m CONFIG_I2C_GPIO=m CONFIG_I2C_IBM_IIC=m -CONFIG_GPIO_XILINX=y # CONFIG_HWMON is not set CONFIG_FB=m -CONFIG_FB_XILINX=m CONFIG_USB=m CONFIG_USB_EHCI_HCD=m CONFIG_USB_OHCI_HCD=m diff --git a/arch/powerpc/include/asm/xilinx_intc.h b/arch/powerpc/include/asm/xilinx_intc.h deleted file mode 100644 index ca9aa162fb09..000000000000 --- a/arch/powerpc/include/asm/xilinx_intc.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Xilinx intc external definitions - * - * Copyright 2007 Secret Lab Technologies Ltd. - */ -#ifndef _ASM_POWERPC_XILINX_INTC_H -#define _ASM_POWERPC_XILINX_INTC_H - -#ifdef __KERNEL__ - -extern void __init xilinx_intc_init_tree(void); -extern unsigned int xintc_get_irq(void); - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_XILINX_INTC_H */ diff --git a/arch/powerpc/include/asm/xilinx_pci.h b/arch/powerpc/include/asm/xilinx_pci.h deleted file mode 100644 index 7a8275caf6af..000000000000 --- a/arch/powerpc/include/asm/xilinx_pci.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Xilinx pci external definitions - * - * Copyright 2009 Roderick Colenbrander - * Copyright 2009 Secret Lab Technologies Ltd. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef INCLUDE_XILINX_PCI -#define INCLUDE_XILINX_PCI - -#ifdef CONFIG_XILINX_PCI -extern void __init xilinx_pci_init(void); -#else -static inline void __init xilinx_pci_init(void) { return; } -#endif - -#endif /* INCLUDE_XILINX_PCI */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 13eba2eb46fe..cae9764b929e 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1385,32 +1385,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_4xx, .platform = "ppc405", }, - { /* Xilinx Virtex-II Pro */ - .pvr_mask = 0xfffff000, - .pvr_value = 0x20010000, - .cpu_name = "Virtex-II Pro", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* Xilinx Virtex-4 FX */ - .pvr_mask = 0xfffff000, - .pvr_value = 0x20011000, - .cpu_name = "Virtex-4 FX", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, { /* 405EP */ .pvr_mask = 0xffff0000, .pvr_value = 0x51210000, @@ -1800,19 +1774,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_440A, .platform = "ppc440", }, - { /* 440 in Xilinx Virtex-5 FXT */ - .pvr_mask = 0xfffffff0, - .pvr_value = 0x7ff21910, - .cpu_name = "440 in Virtex-5 FXT", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440x5, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, { /* 460EX */ .pvr_mask = 0xffff0006, .pvr_value = 0x13020002, diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 6da813b65b42..d06ca51e8443 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -55,23 +55,6 @@ config WALNUT help This option enables support for the IBM PPC405GP evaluation board. -config XILINX_VIRTEX_GENERIC_BOARD - bool "Generic Xilinx Virtex board" - depends on 40x - select XILINX_VIRTEX_II_PRO - select XILINX_VIRTEX_4_FX - select XILINX_INTC - help - This option enables generic support for Xilinx Virtex based boards. - - The generic virtex board support matches any device tree which - specifies 'xilinx,virtex' in its compatible field. This includes - the Xilinx ML3xx and ML4xx reference designs using the powerpc - core. - - Most Virtex designs should use this unless it needs to do some - special configuration at board probe time. - config OBS600 bool "OpenBlockS 600" depends on 40x @@ -109,20 +92,6 @@ config 405EZ select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC -config XILINX_VIRTEX - bool - select DEFAULT_UIMAGE - -config XILINX_VIRTEX_II_PRO - bool - select XILINX_VIRTEX - select IBM405_ERR77 - select IBM405_ERR51 - -config XILINX_VIRTEX_4_FX - bool - select XILINX_VIRTEX - config STB03xxx bool select IBM405_ERR77 diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile index 828d78340dd9..e9386deed505 100644 --- a/arch/powerpc/platforms/40x/Makefile +++ b/arch/powerpc/platforms/40x/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_WALNUT) += walnut.o -obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o obj-$(CONFIG_EP405) += ep405.o obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c deleted file mode 100644 index e3d5e095846b..000000000000 --- a/arch/powerpc/platforms/40x/virtex.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Xilinx Virtex (IIpro & 4FX) based board support - * - * Copyright 2007 Secret Lab Technologies Ltd. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -static const struct of_device_id xilinx_of_bus_ids[] __initconst = { - { .compatible = "xlnx,plb-v46-1.00.a", }, - { .compatible = "xlnx,plb-v34-1.01.a", }, - { .compatible = "xlnx,plb-v34-1.02.a", }, - { .compatible = "xlnx,opb-v20-1.10.c", }, - { .compatible = "xlnx,dcr-v29-1.00.a", }, - { .compatible = "xlnx,compound", }, - {} -}; - -static int __init virtex_device_probe(void) -{ - of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL); - - return 0; -} -machine_device_initcall(virtex, virtex_device_probe); - -static int __init virtex_probe(void) -{ - if (!of_machine_is_compatible("xlnx,virtex")) - return 0; - - return 1; -} - -define_machine(virtex) { - .name = "Xilinx Virtex", - .probe = virtex_probe, - .setup_arch = xilinx_pci_init, - .init_IRQ = xilinx_intc_init_tree, - .get_irq = xintc_get_irq, - .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, -}; diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 25ebe634a661..39e93d23fb38 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -232,33 +232,6 @@ config ICON help This option enables support for the AMCC PPC440SPe evaluation board. -config XILINX_VIRTEX440_GENERIC_BOARD - bool "Generic Xilinx Virtex 5 FXT board support" - depends on 44x - select XILINX_VIRTEX_5_FXT - select XILINX_INTC - help - This option enables generic support for Xilinx Virtex based boards - that use a 440 based processor in the Virtex 5 FXT FPGA architecture. - - The generic virtex board support matches any device tree which - specifies 'xlnx,virtex440' in its compatible field. This includes - the Xilinx ML5xx reference designs using the powerpc core. - - Most Virtex 5 designs should use this unless it needs to do some - special configuration at board probe time. - -config XILINX_ML510 - bool "Xilinx ML510 extra support" - depends on XILINX_VIRTEX440_GENERIC_BOARD - select HAVE_PCI - select XILINX_PCI if PCI - select PPC_INDIRECT_PCI if PCI - select PPC_I8259 if PCI - help - This option enables extra support for features on the Xilinx ML510 - board. The ML510 has a PCI bus with ALI south bridge. - config PPC44x_SIMPLE bool "Simple PowerPC 44x board support" depends on 44x @@ -354,13 +327,3 @@ config 476FPE_ERR46 config IBM440EP_ERR42 bool -# Xilinx specific config options. -config XILINX_VIRTEX - bool - select DEFAULT_UIMAGE - -# Xilinx Virtex 5 FXT FPGA architecture, selected by a Xilinx board above -config XILINX_VIRTEX_5_FXT - bool - select XILINX_VIRTEX - diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index 1b78c6af821a..5ba031f57652 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile @@ -7,8 +7,6 @@ obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o obj-$(CONFIG_EBONY) += ebony.o obj-$(CONFIG_SAM440EP) += sam440ep.o obj-$(CONFIG_WARP) += warp.o -obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o -obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o obj-$(CONFIG_ISS4xx) += iss4xx.o obj-$(CONFIG_CANYONLANDS)+= canyonlands.o obj-$(CONFIG_CURRITUCK) += ppc476.o diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c deleted file mode 100644 index 3eb13ed926ee..000000000000 --- a/arch/powerpc/platforms/44x/virtex.c +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Xilinx Virtex 5FXT based board support, derived from - * the Xilinx Virtex (IIpro & 4FX) based board support - * - * Copyright 2007 Secret Lab Technologies Ltd. - * Copyright 2008 Xilinx, Inc. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "44x.h" - -static const struct of_device_id xilinx_of_bus_ids[] __initconst = { - { .compatible = "simple-bus", }, - { .compatible = "xlnx,plb-v46-1.00.a", }, - { .compatible = "xlnx,plb-v46-1.02.a", }, - { .compatible = "xlnx,plb-v34-1.01.a", }, - { .compatible = "xlnx,plb-v34-1.02.a", }, - { .compatible = "xlnx,opb-v20-1.10.c", }, - { .compatible = "xlnx,dcr-v29-1.00.a", }, - { .compatible = "xlnx,compound", }, - {} -}; - -static int __init virtex_device_probe(void) -{ - of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL); - - return 0; -} -machine_device_initcall(virtex, virtex_device_probe); - -static int __init virtex_probe(void) -{ - if (!of_machine_is_compatible("xlnx,virtex440")) - return 0; - - return 1; -} - -define_machine(virtex) { - .name = "Xilinx Virtex440", - .probe = virtex_probe, - .setup_arch = xilinx_pci_init, - .init_IRQ = xilinx_intc_init_tree, - .get_irq = xintc_get_irq, - .calibrate_decr = generic_calibrate_decr, - .restart = ppc4xx_reset_system, -}; diff --git a/arch/powerpc/platforms/44x/virtex_ml510.c b/arch/powerpc/platforms/44x/virtex_ml510.c deleted file mode 100644 index 349f218b335c..000000000000 --- a/arch/powerpc/platforms/44x/virtex_ml510.c +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include "44x.h" - -/** - * ml510_ail_quirk - */ -static void ml510_ali_quirk(struct pci_dev *dev) -{ - /* Enable the IDE controller */ - pci_write_config_byte(dev, 0x58, 0x4c); - /* Assign irq 14 to the primary ide channel */ - pci_write_config_byte(dev, 0x44, 0x0d); - /* Assign irq 15 to the secondary ide channel */ - pci_write_config_byte(dev, 0x75, 0x0f); - /* Set the ide controller in native mode */ - pci_write_config_byte(dev, 0x09, 0xff); - - /* INTB = disabled, INTA = disabled */ - pci_write_config_byte(dev, 0x48, 0x00); - /* INTD = disabled, INTC = disabled */ - pci_write_config_byte(dev, 0x4a, 0x00); - /* Audio = INT7, Modem = disabled. */ - pci_write_config_byte(dev, 0x4b, 0x60); - /* USB = INT7 */ - pci_write_config_byte(dev, 0x74, 0x06); -} -DECLARE_PCI_FIXUP_EARLY(0x10b9, 0x1533, ml510_ali_quirk); - diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 1f8025383caa..5e6479d409a0 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -317,8 +317,4 @@ config MCU_MPC8349EMITX also register MCU GPIOs with the generic GPIO API, so you'll able to use MCU pins as GPIOs. -config XILINX_PCI - bool "Xilinx PCI host bridge support" - depends on PCI && XILINX_VIRTEX - endmenu diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index cb5a5bd2cef5..026b3f01a991 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -31,8 +31,6 @@ obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o obj-$(CONFIG_PPC_I8259) += i8259.o obj-$(CONFIG_IPIC) += ipic.o -obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o -obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o obj-$(CONFIG_OF_RTC) += of_rtc.o obj-$(CONFIG_CPM) += cpm_common.o diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c deleted file mode 100644 index 4a86dcff3fcd..000000000000 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Interrupt controller driver for Xilinx Virtex FPGAs - * - * Copyright (C) 2007 Secret Lab Technologies Ltd. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - * - */ - -/* - * This is a driver for the interrupt controller typically found in - * Xilinx Virtex FPGA designs. - * - * The interrupt sense levels are hard coded into the FPGA design with - * typically a 1:1 relationship between irq lines and devices (no shared - * irq lines). Therefore, this driver does not attempt to handle edge - * and level interrupts differently. - */ -#undef DEBUG - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(CONFIG_PPC_I8259) -/* - * Support code for cascading to 8259 interrupt controllers - */ -static void xilinx_i8259_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int cascade_irq = i8259_irq(); - - if (cascade_irq) - generic_handle_irq(cascade_irq); - - /* Let xilinx_intc end the interrupt */ - chip->irq_unmask(&desc->irq_data); -} - -static void __init xilinx_i8259_setup_cascade(void) -{ - struct device_node *cascade_node; - int cascade_irq; - - /* Initialize i8259 controller */ - cascade_node = of_find_compatible_node(NULL, NULL, "chrp,iic"); - if (!cascade_node) - return; - - cascade_irq = irq_of_parse_and_map(cascade_node, 0); - if (!cascade_irq) { - pr_err("virtex_ml510: Failed to map cascade interrupt\n"); - goto out; - } - - i8259_init(cascade_node, 0); - irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade); - - /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */ - /* This looks like a dirty hack to me --gcl */ - outb(0xc0, 0x4d0); - outb(0xc0, 0x4d1); - - out: - of_node_put(cascade_node); -} -#else -static inline void xilinx_i8259_setup_cascade(void) { return; } -#endif /* defined(CONFIG_PPC_I8259) */ - -/* - * Initialize master Xilinx interrupt controller - */ -void __init xilinx_intc_init_tree(void) -{ - irqchip_init(); - xilinx_i8259_setup_cascade(); -} diff --git a/arch/powerpc/sysdev/xilinx_pci.c b/arch/powerpc/sysdev/xilinx_pci.c deleted file mode 100644 index fea5667699ed..000000000000 --- a/arch/powerpc/sysdev/xilinx_pci.c +++ /dev/null @@ -1,132 +0,0 @@ -/* - * PCI support for Xilinx plbv46_pci soft-core which can be used on - * Xilinx Virtex ML410 / ML510 boards. - * - * Copyright 2009 Roderick Colenbrander - * Copyright 2009 Secret Lab Technologies Ltd. - * - * The pci bridge fixup code was copied from ppc4xx_pci.c and was written - * by Benjamin Herrenschmidt. - * Copyright 2007 Ben. Herrenschmidt , IBM Corp. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include - -#define XPLB_PCI_ADDR 0x10c -#define XPLB_PCI_DATA 0x110 -#define XPLB_PCI_BUS 0x114 - -#define PCI_HOST_ENABLE_CMD PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY - -static const struct of_device_id xilinx_pci_match[] = { - { .compatible = "xlnx,plbv46-pci-1.03.a", }, - {} -}; - -/** - * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. - */ -static void xilinx_pci_fixup_bridge(struct pci_dev *dev) -{ - struct pci_controller *hose; - int i; - - if (dev->devfn || dev->bus->self) - return; - - hose = pci_bus_to_host(dev->bus); - if (!hose) - return; - - if (!of_match_node(xilinx_pci_match, hose->dn)) - return; - - /* Hide the PCI host BARs from the kernel as their content doesn't - * fit well in the resource management - */ - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - dev->resource[i].start = 0; - dev->resource[i].end = 0; - dev->resource[i].flags = 0; - } - - dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", - pci_name(dev)); -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); - -/** - * xilinx_pci_exclude_device - Don't do config access for non-root bus - * - * This is a hack. Config access to any bus other than bus 0 does not - * currently work on the ML510 so we prevent it here. - */ -static int -xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) -{ - return (bus != 0); -} - -/** - * xilinx_pci_init - Find and register a Xilinx PCI host bridge - */ -void __init xilinx_pci_init(void) -{ - struct pci_controller *hose; - struct resource r; - void __iomem *pci_reg; - struct device_node *pci_node; - - pci_node = of_find_matching_node(NULL, xilinx_pci_match); - if(!pci_node) - return; - - if (of_address_to_resource(pci_node, 0, &r)) { - pr_err("xilinx-pci: cannot resolve base address\n"); - return; - } - - hose = pcibios_alloc_controller(pci_node); - if (!hose) { - pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); - return; - } - - /* Setup config space */ - setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, - r.start + XPLB_PCI_DATA, - PPC_INDIRECT_TYPE_SET_CFG_TYPE); - - /* According to the xilinx plbv46_pci documentation the soft-core starts - * a self-init when the bus master enable bit is set. Without this bit - * set the pci bus can't be scanned. - */ - early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); - - /* Set the max latency timer to 255 */ - early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); - - /* Set the max bus number to 255 */ - pci_reg = of_iomap(pci_node, 0); - out_8(pci_reg + XPLB_PCI_BUS, 0xff); - iounmap(pci_reg); - - /* Nothing past the root bridge is working right now. By default - * exclude config access to anything except bus 0 */ - if (!ppc_md.pci_exclude_device) - ppc_md.pci_exclude_device = xilinx_pci_exclude_device; - - /* Register the host bridge with the linux kernel! */ - pci_process_bridge_OF_ranges(hose, pci_node, 1); - - pr_info("xilinx-pci: Registered PCI host bridge\n"); -} diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d4665fe9ccd2..ac25833eb19e 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -209,7 +209,7 @@ config DTLK config XILINX_HWICAP tristate "Xilinx HWICAP Support" - depends on XILINX_VIRTEX || MICROBLAZE + depends on MICROBLAZE help This option enables support for Xilinx Internal Configuration Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 91b0a719d221..8c720df78113 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2008,7 +2008,7 @@ config FB_PS3_DEFAULT_SIZE_M config FB_XILINX tristate "Xilinx frame buffer support" - depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP) + depends on FB && (MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT -- cgit v1.2.3-59-g8ed1b From f16dca3e30c14aff545a834a7c1a1bb02b9edb48 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 30 Mar 2020 15:32:16 +0200 Subject: sound: ac97: Remove sound driver for ancient platform Xilinx PowerPC platforms are no longer supported and none is really testing these platforms that's why remove them. If someone has any issue with it these patches can be reverted. Signed-off-by: Michal Simek Acked-by: Takashi Iwai Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/31a3b884dde2c47a30bb2b92355978b97ea70f86.1585575111.git.michal.simek@xilinx.com --- sound/drivers/Kconfig | 12 - sound/drivers/Makefile | 2 - sound/drivers/ml403-ac97cr.c | 1298 ----------------------------------------- sound/drivers/pcm-indirect2.c | 560 ------------------ sound/drivers/pcm-indirect2.h | 127 ---- 5 files changed, 1999 deletions(-) delete mode 100644 sound/drivers/ml403-ac97cr.c delete mode 100644 sound/drivers/pcm-indirect2.c delete mode 100644 sound/drivers/pcm-indirect2.h diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig index 577c8e03ec4d..7141f73cddd3 100644 --- a/sound/drivers/Kconfig +++ b/sound/drivers/Kconfig @@ -186,18 +186,6 @@ config SND_PORTMAN2X4 To compile this driver as a module, choose M here: the module will be called snd-portman2x4. -config SND_ML403_AC97CR - tristate "Xilinx ML403 AC97 Controller Reference" - depends on XILINX_VIRTEX - select SND_AC97_CODEC - help - Say Y here to include support for the - opb_ac97_controller_ref_v1_00_a ip core found in Xilinx's ML403 - reference design. - - To compile this driver as a module, choose M here: the module - will be called snd-ml403_ac97cr. - config SND_AC97_POWER_SAVE bool "AC97 Power-Saving Mode" depends on SND_AC97_CODEC diff --git a/sound/drivers/Makefile b/sound/drivers/Makefile index 615558a281c8..c0fe4eccdaef 100644 --- a/sound/drivers/Makefile +++ b/sound/drivers/Makefile @@ -11,7 +11,6 @@ snd-mts64-objs := mts64.o snd-portman2x4-objs := portman2x4.o snd-serial-u16550-objs := serial-u16550.o snd-virmidi-objs := virmidi.o -snd-ml403-ac97cr-objs := ml403-ac97cr.o pcm-indirect2.o # Toplevel Module Dependency obj-$(CONFIG_SND_DUMMY) += snd-dummy.o @@ -21,6 +20,5 @@ obj-$(CONFIG_SND_SERIAL_U16550) += snd-serial-u16550.o obj-$(CONFIG_SND_MTPAV) += snd-mtpav.o obj-$(CONFIG_SND_MTS64) += snd-mts64.o obj-$(CONFIG_SND_PORTMAN2X4) += snd-portman2x4.o -obj-$(CONFIG_SND_ML403_AC97CR) += snd-ml403-ac97cr.o obj-$(CONFIG_SND) += opl3/ opl4/ mpu401/ vx/ pcsp/ diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c deleted file mode 100644 index 0710707da8c1..000000000000 --- a/sound/drivers/ml403-ac97cr.c +++ /dev/null @@ -1,1298 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * ALSA driver for Xilinx ML403 AC97 Controller Reference - * IP: opb_ac97_controller_ref_v1_00_a (EDK 8.1i) - * IP: opb_ac97_controller_ref_v1_00_a (EDK 9.1i) - * - * Copyright (c) by 2007 Joachim Foerster - */ - -/* Some notes / status of this driver: - * - * - Don't wonder about some strange implementations of things - especially the - * (heavy) shadowing of codec registers, with which I tried to reduce read - * accesses to a minimum, because after a variable amount of accesses, the AC97 - * controller doesn't raise the register access finished bit anymore ... - * - * - Playback support seems to be pretty stable - no issues here. - * - Capture support "works" now, too. Overruns don't happen any longer so often. - * But there might still be some ... - */ - -#include -#include - -#include - -#include -#include -#include -#include - -/* HZ */ -#include -/* jiffies, time_*() */ -#include -/* schedule_timeout*() */ -#include -/* spin_lock*() */ -#include -/* struct mutex, mutex_init(), mutex_*lock() */ -#include - -/* snd_printk(), snd_printd() */ -#include -#include -#include -#include -#include - -#include "pcm-indirect2.h" - - -#define SND_ML403_AC97CR_DRIVER "ml403-ac97cr" - -MODULE_AUTHOR("Joachim Foerster "); -MODULE_DESCRIPTION("Xilinx ML403 AC97 Controller Reference"); -MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("{{Xilinx,ML403 AC97 Controller Reference}}"); - -static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; -static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; -static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; - -module_param_array(index, int, NULL, 0444); -MODULE_PARM_DESC(index, "Index value for ML403 AC97 Controller Reference."); -module_param_array(id, charp, NULL, 0444); -MODULE_PARM_DESC(id, "ID string for ML403 AC97 Controller Reference."); -module_param_array(enable, bool, NULL, 0444); -MODULE_PARM_DESC(enable, "Enable this ML403 AC97 Controller Reference."); - -/* Special feature options */ -/*#define CODEC_WRITE_CHECK_RAF*/ /* don't return after a write to a codec - * register, while RAF bit is not set - */ -/* Debug options for code which may be removed completely in a final version */ -#ifdef CONFIG_SND_DEBUG -/*#define CODEC_STAT*/ /* turn on some minimal "statistics" - * about codec register usage - */ -#define SND_PCM_INDIRECT2_STAT /* turn on some "statistics" about the - * process of copying bytes from the - * intermediate buffer to the hardware - * fifo and the other way round - */ -#endif - -/* Definition of a "level/facility dependent" printk(); may be removed - * completely in a final version - */ -#undef PDEBUG -#ifdef CONFIG_SND_DEBUG -/* "facilities" for PDEBUG */ -#define UNKNOWN (1<<0) -#define CODEC_SUCCESS (1<<1) -#define CODEC_FAKE (1<<2) -#define INIT_INFO (1<<3) -#define INIT_FAILURE (1<<4) -#define WORK_INFO (1<<5) -#define WORK_FAILURE (1<<6) - -#define PDEBUG_FACILITIES (UNKNOWN | INIT_FAILURE | WORK_FAILURE) - -#define PDEBUG(fac, fmt, args...) do { \ - if (fac & PDEBUG_FACILITIES) \ - snd_printd(KERN_DEBUG SND_ML403_AC97CR_DRIVER ": " \ - fmt, ##args); \ - } while (0) -#else -#define PDEBUG(fac, fmt, args...) /* nothing */ -#endif - - - -/* Defines for "waits"/timeouts (portions of HZ=250 on arch/ppc by default) */ -#define CODEC_TIMEOUT_ON_INIT 5 /* timeout for checking for codec - * readiness (after insmod) - */ -#ifndef CODEC_WRITE_CHECK_RAF -#define CODEC_WAIT_AFTER_WRITE 100 /* general, static wait after a write - * access to a codec register, may be - * 0 to completely remove wait - */ -#else -#define CODEC_TIMEOUT_AFTER_WRITE 5 /* timeout after a write access to a - * codec register, if RAF bit is used - */ -#endif -#define CODEC_TIMEOUT_AFTER_READ 5 /* timeout after a read access to a - * codec register (checking RAF bit) - */ - -/* Infrastructure for codec register shadowing */ -#define LM4550_REG_OK (1<<0) /* register exists */ -#define LM4550_REG_DONEREAD (1<<1) /* read register once, value should be - * the same currently in the register - */ -#define LM4550_REG_NOSAVE (1<<2) /* values written to this register will - * not be saved in the register - */ -#define LM4550_REG_NOSHADOW (1<<3) /* don't do register shadowing, use plain - * hardware access - */ -#define LM4550_REG_READONLY (1<<4) /* register is read only */ -#define LM4550_REG_FAKEPROBE (1<<5) /* fake write _and_ read actions during - * probe() correctly - */ -#define LM4550_REG_FAKEREAD (1<<6) /* fake read access, always return - * default value - */ -#define LM4550_REG_ALLFAKE (LM4550_REG_FAKEREAD | LM4550_REG_FAKEPROBE) - -struct lm4550_reg { - u16 value; - u16 flag; - u16 wmask; - u16 def; -}; - -struct lm4550_reg lm4550_regfile[64] = { - [AC97_RESET / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_NOSAVE \ - | LM4550_REG_FAKEREAD, - .def = 0x0D50}, - [AC97_MASTER / 2] = {.flag = LM4550_REG_OK - | LM4550_REG_FAKEPROBE, - .wmask = 0x9F1F, - .def = 0x8000}, - [AC97_HEADPHONE / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x9F1F, - .def = 0x8000}, - [AC97_MASTER_MONO / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x801F, - .def = 0x8000}, - [AC97_PC_BEEP / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x801E, - .def = 0x0}, - [AC97_PHONE / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x801F, - .def = 0x8008}, - [AC97_MIC / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x805F, - .def = 0x8008}, - [AC97_LINE / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x9F1F, - .def = 0x8808}, - [AC97_CD / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x9F1F, - .def = 0x8808}, - [AC97_VIDEO / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x9F1F, - .def = 0x8808}, - [AC97_AUX / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x9F1F, - .def = 0x8808}, - [AC97_PCM / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x9F1F, - .def = 0x8008}, - [AC97_REC_SEL / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x707, - .def = 0x0}, - [AC97_REC_GAIN / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .wmask = 0x8F0F, - .def = 0x8000}, - [AC97_GENERAL_PURPOSE / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .def = 0x0, - .wmask = 0xA380}, - [AC97_3D_CONTROL / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEREAD \ - | LM4550_REG_READONLY, - .def = 0x0101}, - [AC97_POWERDOWN / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_NOSHADOW \ - | LM4550_REG_NOSAVE, - .wmask = 0xFF00}, - /* may not write ones to - * REF/ANL/DAC/ADC bits - * FIXME: Is this ok? - */ - [AC97_EXTENDED_ID / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEREAD \ - | LM4550_REG_READONLY, - .def = 0x0201}, /* primary codec */ - [AC97_EXTENDED_STATUS / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_NOSHADOW \ - | LM4550_REG_NOSAVE, - .wmask = 0x1}, - [AC97_PCM_FRONT_DAC_RATE / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .def = 0xBB80, - .wmask = 0xFFFF}, - [AC97_PCM_LR_ADC_RATE / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_FAKEPROBE, - .def = 0xBB80, - .wmask = 0xFFFF}, - [AC97_VENDOR_ID1 / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_READONLY \ - | LM4550_REG_FAKEREAD, - .def = 0x4E53}, - [AC97_VENDOR_ID2 / 2] = {.flag = LM4550_REG_OK \ - | LM4550_REG_READONLY \ - | LM4550_REG_FAKEREAD, - .def = 0x4350} -}; - -#define LM4550_RF_OK(reg) (lm4550_regfile[reg / 2].flag & LM4550_REG_OK) - -static void lm4550_regfile_init(void) -{ - int i; - for (i = 0; i < 64; i++) - if (lm4550_regfile[i].flag & LM4550_REG_FAKEPROBE) - lm4550_regfile[i].value = lm4550_regfile[i].def; -} - -static void lm4550_regfile_write_values_after_init(struct snd_ac97 *ac97) -{ - int i; - for (i = 0; i < 64; i++) - if ((lm4550_regfile[i].flag & LM4550_REG_FAKEPROBE) && - (lm4550_regfile[i].value != lm4550_regfile[i].def)) { - PDEBUG(CODEC_FAKE, "lm4550_regfile_write_values_after_" - "init(): reg=0x%x value=0x%x / %d is different " - "from def=0x%x / %d\n", - i, lm4550_regfile[i].value, - lm4550_regfile[i].value, lm4550_regfile[i].def, - lm4550_regfile[i].def); - snd_ac97_write(ac97, i * 2, lm4550_regfile[i].value); - lm4550_regfile[i].flag |= LM4550_REG_DONEREAD; - } -} - - -/* direct registers */ -#define CR_REG(ml403_ac97cr, x) ((ml403_ac97cr)->port + CR_REG_##x) - -#define CR_REG_PLAYFIFO 0x00 -#define CR_PLAYDATA(a) ((a) & 0xFFFF) - -#define CR_REG_RECFIFO 0x04 -#define CR_RECDATA(a) ((a) & 0xFFFF) - -#define CR_REG_STATUS 0x08 -#define CR_RECOVER (1<<7) -#define CR_PLAYUNDER (1<<6) -#define CR_CODECREADY (1<<5) -#define CR_RAF (1<<4) -#define CR_RECEMPTY (1<<3) -#define CR_RECFULL (1<<2) -#define CR_PLAYHALF (1<<1) -#define CR_PLAYFULL (1<<0) - -#define CR_REG_RESETFIFO 0x0C -#define CR_RECRESET (1<<1) -#define CR_PLAYRESET (1<<0) - -#define CR_REG_CODEC_ADDR 0x10 -/* UG082 says: - * #define CR_CODEC_ADDR(a) ((a) << 1) - * #define CR_CODEC_READ (1<<0) - * #define CR_CODEC_WRITE (0<<0) - */ -/* RefDesign example says: */ -#define CR_CODEC_ADDR(a) ((a) << 0) -#define CR_CODEC_READ (1<<7) -#define CR_CODEC_WRITE (0<<7) - -#define CR_REG_CODEC_DATAREAD 0x14 -#define CR_CODEC_DATAREAD(v) ((v) & 0xFFFF) - -#define CR_REG_CODEC_DATAWRITE 0x18 -#define CR_CODEC_DATAWRITE(v) ((v) & 0xFFFF) - -#define CR_FIFO_SIZE 32 - -struct snd_ml403_ac97cr { - /* lock for access to (controller) registers */ - spinlock_t reg_lock; - /* mutex for the whole sequence of accesses to (controller) registers - * which affect codec registers - */ - struct mutex cdc_mutex; - - int irq; /* for playback */ - int enable_irq; /* for playback */ - - int capture_irq; - int enable_capture_irq; - - struct resource *res_port; - void *port; - - struct snd_ac97 *ac97; - int ac97_fake; -#ifdef CODEC_STAT - int ac97_read; - int ac97_write; -#endif - - struct platform_device *pfdev; - struct snd_card *card; - struct snd_pcm *pcm; - struct snd_pcm_substream *playback_substream; - struct snd_pcm_substream *capture_substream; - - struct snd_pcm_indirect2 ind_rec; /* for playback */ - struct snd_pcm_indirect2 capture_ind2_rec; -}; - -static const struct snd_pcm_hardware snd_ml403_ac97cr_playback = { - .info = (SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_INTERLEAVED | - SNDRV_PCM_INFO_MMAP_VALID), - .formats = SNDRV_PCM_FMTBIT_S16_BE, - .rates = (SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_8000_48000), - .rate_min = 4000, - .rate_max = 48000, - .channels_min = 2, - .channels_max = 2, - .buffer_bytes_max = (128*1024), - .period_bytes_min = CR_FIFO_SIZE/2, - .period_bytes_max = (64*1024), - .periods_min = 2, - .periods_max = (128*1024)/(CR_FIFO_SIZE/2), - .fifo_size = 0, -}; - -static const struct snd_pcm_hardware snd_ml403_ac97cr_capture = { - .info = (SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_INTERLEAVED | - SNDRV_PCM_INFO_MMAP_VALID), - .formats = SNDRV_PCM_FMTBIT_S16_BE, - .rates = (SNDRV_PCM_RATE_CONTINUOUS | - SNDRV_PCM_RATE_8000_48000), - .rate_min = 4000, - .rate_max = 48000, - .channels_min = 2, - .channels_max = 2, - .buffer_bytes_max = (128*1024), - .period_bytes_min = CR_FIFO_SIZE/2, - .period_bytes_max = (64*1024), - .periods_min = 2, - .periods_max = (128*1024)/(CR_FIFO_SIZE/2), - .fifo_size = 0, -}; - -static size_t -snd_ml403_ac97cr_playback_ind2_zero(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - int copied_words = 0; - u32 full = 0; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - - spin_lock(&ml403_ac97cr->reg_lock); - while ((full = (in_be32(CR_REG(ml403_ac97cr, STATUS)) & - CR_PLAYFULL)) != CR_PLAYFULL) { - out_be32(CR_REG(ml403_ac97cr, PLAYFIFO), 0); - copied_words++; - } - rec->hw_ready = 0; - spin_unlock(&ml403_ac97cr->reg_lock); - - return (size_t) (copied_words * 2); -} - -static size_t -snd_ml403_ac97cr_playback_ind2_copy(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - size_t bytes) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - u16 *src; - int copied_words = 0; - u32 full = 0; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - src = (u16 *)(substream->runtime->dma_area + rec->sw_data); - - spin_lock(&ml403_ac97cr->reg_lock); - while (((full = (in_be32(CR_REG(ml403_ac97cr, STATUS)) & - CR_PLAYFULL)) != CR_PLAYFULL) && (bytes > 1)) { - out_be32(CR_REG(ml403_ac97cr, PLAYFIFO), - CR_PLAYDATA(src[copied_words])); - copied_words++; - bytes = bytes - 2; - } - if (full != CR_PLAYFULL) - rec->hw_ready = 1; - else - rec->hw_ready = 0; - spin_unlock(&ml403_ac97cr->reg_lock); - - return (size_t) (copied_words * 2); -} - -static size_t -snd_ml403_ac97cr_capture_ind2_null(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - int copied_words = 0; - u32 empty = 0; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - - spin_lock(&ml403_ac97cr->reg_lock); - while ((empty = (in_be32(CR_REG(ml403_ac97cr, STATUS)) & - CR_RECEMPTY)) != CR_RECEMPTY) { - volatile u32 trash; - - trash = CR_RECDATA(in_be32(CR_REG(ml403_ac97cr, RECFIFO))); - /* Hmmmm, really necessary? Don't want call to in_be32() - * to be optimised away! - */ - trash++; - copied_words++; - } - rec->hw_ready = 0; - spin_unlock(&ml403_ac97cr->reg_lock); - - return (size_t) (copied_words * 2); -} - -static size_t -snd_ml403_ac97cr_capture_ind2_copy(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, size_t bytes) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - u16 *dst; - int copied_words = 0; - u32 empty = 0; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - dst = (u16 *)(substream->runtime->dma_area + rec->sw_data); - - spin_lock(&ml403_ac97cr->reg_lock); - while (((empty = (in_be32(CR_REG(ml403_ac97cr, STATUS)) & - CR_RECEMPTY)) != CR_RECEMPTY) && (bytes > 1)) { - dst[copied_words] = CR_RECDATA(in_be32(CR_REG(ml403_ac97cr, - RECFIFO))); - copied_words++; - bytes = bytes - 2; - } - if (empty != CR_RECEMPTY) - rec->hw_ready = 1; - else - rec->hw_ready = 0; - spin_unlock(&ml403_ac97cr->reg_lock); - - return (size_t) (copied_words * 2); -} - -static snd_pcm_uframes_t -snd_ml403_ac97cr_pcm_pointer(struct snd_pcm_substream *substream) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - struct snd_pcm_indirect2 *ind2_rec = NULL; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - - if (substream == ml403_ac97cr->playback_substream) - ind2_rec = &ml403_ac97cr->ind_rec; - if (substream == ml403_ac97cr->capture_substream) - ind2_rec = &ml403_ac97cr->capture_ind2_rec; - - if (ind2_rec != NULL) - return snd_pcm_indirect2_pointer(substream, ind2_rec); - return (snd_pcm_uframes_t) 0; -} - -static int -snd_ml403_ac97cr_pcm_playback_trigger(struct snd_pcm_substream *substream, - int cmd) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - int err = 0; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - PDEBUG(WORK_INFO, "trigger(playback): START\n"); - ml403_ac97cr->ind_rec.hw_ready = 1; - - /* clear play FIFO */ - out_be32(CR_REG(ml403_ac97cr, RESETFIFO), CR_PLAYRESET); - - /* enable play irq */ - ml403_ac97cr->enable_irq = 1; - enable_irq(ml403_ac97cr->irq); - break; - case SNDRV_PCM_TRIGGER_STOP: - PDEBUG(WORK_INFO, "trigger(playback): STOP\n"); - ml403_ac97cr->ind_rec.hw_ready = 0; -#ifdef SND_PCM_INDIRECT2_STAT - snd_pcm_indirect2_stat(substream, &ml403_ac97cr->ind_rec); -#endif - /* disable play irq */ - disable_irq_nosync(ml403_ac97cr->irq); - ml403_ac97cr->enable_irq = 0; - break; - default: - err = -EINVAL; - break; - } - PDEBUG(WORK_INFO, "trigger(playback): (done)\n"); - return err; -} - -static int -snd_ml403_ac97cr_pcm_capture_trigger(struct snd_pcm_substream *substream, - int cmd) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - int err = 0; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - PDEBUG(WORK_INFO, "trigger(capture): START\n"); - ml403_ac97cr->capture_ind2_rec.hw_ready = 0; - - /* clear record FIFO */ - out_be32(CR_REG(ml403_ac97cr, RESETFIFO), CR_RECRESET); - - /* enable record irq */ - ml403_ac97cr->enable_capture_irq = 1; - enable_irq(ml403_ac97cr->capture_irq); - break; - case SNDRV_PCM_TRIGGER_STOP: - PDEBUG(WORK_INFO, "trigger(capture): STOP\n"); - ml403_ac97cr->capture_ind2_rec.hw_ready = 0; -#ifdef SND_PCM_INDIRECT2_STAT - snd_pcm_indirect2_stat(substream, - &ml403_ac97cr->capture_ind2_rec); -#endif - /* disable capture irq */ - disable_irq_nosync(ml403_ac97cr->capture_irq); - ml403_ac97cr->enable_capture_irq = 0; - break; - default: - err = -EINVAL; - break; - } - PDEBUG(WORK_INFO, "trigger(capture): (done)\n"); - return err; -} - -static int -snd_ml403_ac97cr_pcm_playback_prepare(struct snd_pcm_substream *substream) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - struct snd_pcm_runtime *runtime; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - runtime = substream->runtime; - - PDEBUG(WORK_INFO, - "prepare(): period_bytes=%d, minperiod_bytes=%d\n", - snd_pcm_lib_period_bytes(substream), CR_FIFO_SIZE / 2); - - /* set sampling rate */ - snd_ac97_set_rate(ml403_ac97cr->ac97, AC97_PCM_FRONT_DAC_RATE, - runtime->rate); - PDEBUG(WORK_INFO, "prepare(): rate=%d\n", runtime->rate); - - /* init struct for intermediate buffer */ - memset(&ml403_ac97cr->ind_rec, 0, - sizeof(struct snd_pcm_indirect2)); - ml403_ac97cr->ind_rec.hw_buffer_size = CR_FIFO_SIZE; - ml403_ac97cr->ind_rec.sw_buffer_size = - snd_pcm_lib_buffer_bytes(substream); - ml403_ac97cr->ind_rec.min_periods = -1; - ml403_ac97cr->ind_rec.min_multiple = - snd_pcm_lib_period_bytes(substream) / (CR_FIFO_SIZE / 2); - PDEBUG(WORK_INFO, "prepare(): hw_buffer_size=%d, " - "sw_buffer_size=%d, min_multiple=%d\n", - CR_FIFO_SIZE, ml403_ac97cr->ind_rec.sw_buffer_size, - ml403_ac97cr->ind_rec.min_multiple); - return 0; -} - -static int -snd_ml403_ac97cr_pcm_capture_prepare(struct snd_pcm_substream *substream) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - struct snd_pcm_runtime *runtime; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - runtime = substream->runtime; - - PDEBUG(WORK_INFO, - "prepare(capture): period_bytes=%d, minperiod_bytes=%d\n", - snd_pcm_lib_period_bytes(substream), CR_FIFO_SIZE / 2); - - /* set sampling rate */ - snd_ac97_set_rate(ml403_ac97cr->ac97, AC97_PCM_LR_ADC_RATE, - runtime->rate); - PDEBUG(WORK_INFO, "prepare(capture): rate=%d\n", runtime->rate); - - /* init struct for intermediate buffer */ - memset(&ml403_ac97cr->capture_ind2_rec, 0, - sizeof(struct snd_pcm_indirect2)); - ml403_ac97cr->capture_ind2_rec.hw_buffer_size = CR_FIFO_SIZE; - ml403_ac97cr->capture_ind2_rec.sw_buffer_size = - snd_pcm_lib_buffer_bytes(substream); - ml403_ac97cr->capture_ind2_rec.min_multiple = - snd_pcm_lib_period_bytes(substream) / (CR_FIFO_SIZE / 2); - PDEBUG(WORK_INFO, "prepare(capture): hw_buffer_size=%d, " - "sw_buffer_size=%d, min_multiple=%d\n", CR_FIFO_SIZE, - ml403_ac97cr->capture_ind2_rec.sw_buffer_size, - ml403_ac97cr->capture_ind2_rec.min_multiple); - return 0; -} - -static int snd_ml403_ac97cr_playback_open(struct snd_pcm_substream *substream) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - struct snd_pcm_runtime *runtime; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - runtime = substream->runtime; - - PDEBUG(WORK_INFO, "open(playback)\n"); - ml403_ac97cr->playback_substream = substream; - runtime->hw = snd_ml403_ac97cr_playback; - - snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_PERIOD_BYTES, - CR_FIFO_SIZE / 2); - return 0; -} - -static int snd_ml403_ac97cr_capture_open(struct snd_pcm_substream *substream) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - struct snd_pcm_runtime *runtime; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - runtime = substream->runtime; - - PDEBUG(WORK_INFO, "open(capture)\n"); - ml403_ac97cr->capture_substream = substream; - runtime->hw = snd_ml403_ac97cr_capture; - - snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_PERIOD_BYTES, - CR_FIFO_SIZE / 2); - return 0; -} - -static int snd_ml403_ac97cr_playback_close(struct snd_pcm_substream *substream) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - - PDEBUG(WORK_INFO, "close(playback)\n"); - ml403_ac97cr->playback_substream = NULL; - return 0; -} - -static int snd_ml403_ac97cr_capture_close(struct snd_pcm_substream *substream) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - - ml403_ac97cr = snd_pcm_substream_chip(substream); - - PDEBUG(WORK_INFO, "close(capture)\n"); - ml403_ac97cr->capture_substream = NULL; - return 0; -} - -static const struct snd_pcm_ops snd_ml403_ac97cr_playback_ops = { - .open = snd_ml403_ac97cr_playback_open, - .close = snd_ml403_ac97cr_playback_close, - .prepare = snd_ml403_ac97cr_pcm_playback_prepare, - .trigger = snd_ml403_ac97cr_pcm_playback_trigger, - .pointer = snd_ml403_ac97cr_pcm_pointer, -}; - -static const struct snd_pcm_ops snd_ml403_ac97cr_capture_ops = { - .open = snd_ml403_ac97cr_capture_open, - .close = snd_ml403_ac97cr_capture_close, - .prepare = snd_ml403_ac97cr_pcm_capture_prepare, - .trigger = snd_ml403_ac97cr_pcm_capture_trigger, - .pointer = snd_ml403_ac97cr_pcm_pointer, -}; - -static irqreturn_t snd_ml403_ac97cr_irq(int irq, void *dev_id) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - struct platform_device *pfdev; - int cmp_irq; - - ml403_ac97cr = (struct snd_ml403_ac97cr *)dev_id; - if (ml403_ac97cr == NULL) - return IRQ_NONE; - - pfdev = ml403_ac97cr->pfdev; - - /* playback interrupt */ - cmp_irq = platform_get_irq(pfdev, 0); - if (irq == cmp_irq) { - if (ml403_ac97cr->enable_irq) - snd_pcm_indirect2_playback_interrupt( - ml403_ac97cr->playback_substream, - &ml403_ac97cr->ind_rec, - snd_ml403_ac97cr_playback_ind2_copy, - snd_ml403_ac97cr_playback_ind2_zero); - else - goto __disable_irq; - } else { - /* record interrupt */ - cmp_irq = platform_get_irq(pfdev, 1); - if (irq == cmp_irq) { - if (ml403_ac97cr->enable_capture_irq) - snd_pcm_indirect2_capture_interrupt( - ml403_ac97cr->capture_substream, - &ml403_ac97cr->capture_ind2_rec, - snd_ml403_ac97cr_capture_ind2_copy, - snd_ml403_ac97cr_capture_ind2_null); - else - goto __disable_irq; - } else - return IRQ_NONE; - } - return IRQ_HANDLED; - -__disable_irq: - PDEBUG(INIT_INFO, "irq(): irq %d is meant to be disabled! So, now try " - "to disable it _really_!\n", irq); - disable_irq_nosync(irq); - return IRQ_HANDLED; -} - -static unsigned short -snd_ml403_ac97cr_codec_read(struct snd_ac97 *ac97, unsigned short reg) -{ - struct snd_ml403_ac97cr *ml403_ac97cr = ac97->private_data; -#ifdef CODEC_STAT - u32 stat; - u32 rafaccess = 0; -#endif - unsigned long end_time; - u16 value = 0; - - if (!LM4550_RF_OK(reg)) { - snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": " - "access to unknown/unused codec register 0x%x " - "ignored!\n", reg); - return 0; - } - /* check if we can fake/answer this access from our shadow register */ - if ((lm4550_regfile[reg / 2].flag & - (LM4550_REG_DONEREAD | LM4550_REG_ALLFAKE)) && - !(lm4550_regfile[reg / 2].flag & LM4550_REG_NOSHADOW)) { - if (lm4550_regfile[reg / 2].flag & LM4550_REG_FAKEREAD) { - PDEBUG(CODEC_FAKE, "codec_read(): faking read from " - "reg=0x%x, val=0x%x / %d\n", - reg, lm4550_regfile[reg / 2].def, - lm4550_regfile[reg / 2].def); - return lm4550_regfile[reg / 2].def; - } else if ((lm4550_regfile[reg / 2].flag & - LM4550_REG_FAKEPROBE) && - ml403_ac97cr->ac97_fake) { - PDEBUG(CODEC_FAKE, "codec_read(): faking read from " - "reg=0x%x, val=0x%x / %d (probe)\n", - reg, lm4550_regfile[reg / 2].value, - lm4550_regfile[reg / 2].value); - return lm4550_regfile[reg / 2].value; - } else { -#ifdef CODEC_STAT - PDEBUG(CODEC_FAKE, "codec_read(): read access " - "answered by shadow register 0x%x (value=0x%x " - "/ %d) (cw=%d cr=%d)\n", - reg, lm4550_regfile[reg / 2].value, - lm4550_regfile[reg / 2].value, - ml403_ac97cr->ac97_write, - ml403_ac97cr->ac97_read); -#else - PDEBUG(CODEC_FAKE, "codec_read(): read access " - "answered by shadow register 0x%x (value=0x%x " - "/ %d)\n", - reg, lm4550_regfile[reg / 2].value, - lm4550_regfile[reg / 2].value); -#endif - return lm4550_regfile[reg / 2].value; - } - } - /* if we are here, we _have_ to access the codec really, no faking */ - if (mutex_lock_interruptible(&ml403_ac97cr->cdc_mutex) != 0) - return 0; -#ifdef CODEC_STAT - ml403_ac97cr->ac97_read++; -#endif - spin_lock(&ml403_ac97cr->reg_lock); - out_be32(CR_REG(ml403_ac97cr, CODEC_ADDR), - CR_CODEC_ADDR(reg) | CR_CODEC_READ); - spin_unlock(&ml403_ac97cr->reg_lock); - end_time = jiffies + (HZ / CODEC_TIMEOUT_AFTER_READ); - do { - spin_lock(&ml403_ac97cr->reg_lock); -#ifdef CODEC_STAT - rafaccess++; - stat = in_be32(CR_REG(ml403_ac97cr, STATUS)); - if ((stat & CR_RAF) == CR_RAF) { - value = CR_CODEC_DATAREAD( - in_be32(CR_REG(ml403_ac97cr, CODEC_DATAREAD))); - PDEBUG(CODEC_SUCCESS, "codec_read(): (done) reg=0x%x, " - "value=0x%x / %d (STATUS=0x%x)\n", - reg, value, value, stat); -#else - if ((in_be32(CR_REG(ml403_ac97cr, STATUS)) & - CR_RAF) == CR_RAF) { - value = CR_CODEC_DATAREAD( - in_be32(CR_REG(ml403_ac97cr, CODEC_DATAREAD))); - PDEBUG(CODEC_SUCCESS, "codec_read(): (done) " - "reg=0x%x, value=0x%x / %d\n", - reg, value, value); -#endif - lm4550_regfile[reg / 2].value = value; - lm4550_regfile[reg / 2].flag |= LM4550_REG_DONEREAD; - spin_unlock(&ml403_ac97cr->reg_lock); - mutex_unlock(&ml403_ac97cr->cdc_mutex); - return value; - } - spin_unlock(&ml403_ac97cr->reg_lock); - schedule_timeout_uninterruptible(1); - } while (time_after(end_time, jiffies)); - /* read the DATAREAD register anyway, see comment below */ - spin_lock(&ml403_ac97cr->reg_lock); - value = - CR_CODEC_DATAREAD(in_be32(CR_REG(ml403_ac97cr, CODEC_DATAREAD))); - spin_unlock(&ml403_ac97cr->reg_lock); -#ifdef CODEC_STAT - snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": " - "timeout while codec read! " - "(reg=0x%x, last STATUS=0x%x, DATAREAD=0x%x / %d, %d) " - "(cw=%d, cr=%d)\n", - reg, stat, value, value, rafaccess, - ml403_ac97cr->ac97_write, ml403_ac97cr->ac97_read); -#else - snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": " - "timeout while codec read! " - "(reg=0x%x, DATAREAD=0x%x / %d)\n", - reg, value, value); -#endif - /* BUG: This is PURE speculation! But after _most_ read timeouts the - * value in the register is ok! - */ - lm4550_regfile[reg / 2].value = value; - lm4550_regfile[reg / 2].flag |= LM4550_REG_DONEREAD; - mutex_unlock(&ml403_ac97cr->cdc_mutex); - return value; -} - -static void -snd_ml403_ac97cr_codec_write(struct snd_ac97 *ac97, unsigned short reg, - unsigned short val) -{ - struct snd_ml403_ac97cr *ml403_ac97cr = ac97->private_data; - -#ifdef CODEC_STAT - u32 stat; - u32 rafaccess = 0; -#endif -#ifdef CODEC_WRITE_CHECK_RAF - unsigned long end_time; -#endif - - if (!LM4550_RF_OK(reg)) { - snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": " - "access to unknown/unused codec register 0x%x " - "ignored!\n", reg); - return; - } - if (lm4550_regfile[reg / 2].flag & LM4550_REG_READONLY) { - snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": " - "write access to read only codec register 0x%x " - "ignored!\n", reg); - return; - } - if ((val & lm4550_regfile[reg / 2].wmask) != val) { - snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": " - "write access to codec register 0x%x " - "with bad value 0x%x / %d!\n", - reg, val, val); - val = val & lm4550_regfile[reg / 2].wmask; - } - if (((lm4550_regfile[reg / 2].flag & LM4550_REG_FAKEPROBE) && - ml403_ac97cr->ac97_fake) && - !(lm4550_regfile[reg / 2].flag & LM4550_REG_NOSHADOW)) { - PDEBUG(CODEC_FAKE, "codec_write(): faking write to reg=0x%x, " - "val=0x%x / %d\n", reg, val, val); - lm4550_regfile[reg / 2].value = (val & - lm4550_regfile[reg / 2].wmask); - return; - } - if (mutex_lock_interruptible(&ml403_ac97cr->cdc_mutex) != 0) - return; -#ifdef CODEC_STAT - ml403_ac97cr->ac97_write++; -#endif - spin_lock(&ml403_ac97cr->reg_lock); - out_be32(CR_REG(ml403_ac97cr, CODEC_DATAWRITE), - CR_CODEC_DATAWRITE(val)); - out_be32(CR_REG(ml403_ac97cr, CODEC_ADDR), - CR_CODEC_ADDR(reg) | CR_CODEC_WRITE); - spin_unlock(&ml403_ac97cr->reg_lock); -#ifdef CODEC_WRITE_CHECK_RAF - /* check CR_CODEC_RAF bit to see if write access to register is done; - * loop until bit is set or timeout happens - */ - end_time = jiffies + HZ / CODEC_TIMEOUT_AFTER_WRITE; - do { - spin_lock(&ml403_ac97cr->reg_lock); -#ifdef CODEC_STAT - rafaccess++; - stat = in_be32(CR_REG(ml403_ac97cr, STATUS)) - if ((stat & CR_RAF) == CR_RAF) { -#else - if ((in_be32(CR_REG(ml403_ac97cr, STATUS)) & - CR_RAF) == CR_RAF) { -#endif - PDEBUG(CODEC_SUCCESS, "codec_write(): (done) " - "reg=0x%x, value=%d / 0x%x\n", - reg, val, val); - if (!(lm4550_regfile[reg / 2].flag & - LM4550_REG_NOSHADOW) && - !(lm4550_regfile[reg / 2].flag & - LM4550_REG_NOSAVE)) - lm4550_regfile[reg / 2].value = val; - lm4550_regfile[reg / 2].flag |= LM4550_REG_DONEREAD; - spin_unlock(&ml403_ac97cr->reg_lock); - mutex_unlock(&ml403_ac97cr->cdc_mutex); - return; - } - spin_unlock(&ml403_ac97cr->reg_lock); - schedule_timeout_uninterruptible(1); - } while (time_after(end_time, jiffies)); -#ifdef CODEC_STAT - snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": " - "timeout while codec write " - "(reg=0x%x, val=0x%x / %d, last STATUS=0x%x, %d) " - "(cw=%d, cr=%d)\n", - reg, val, val, stat, rafaccess, ml403_ac97cr->ac97_write, - ml403_ac97cr->ac97_read); -#else - snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": " - "timeout while codec write (reg=0x%x, val=0x%x / %d)\n", - reg, val, val); -#endif -#else /* CODEC_WRITE_CHECK_RAF */ -#if CODEC_WAIT_AFTER_WRITE > 0 - /* officially, in AC97 spec there is no possibility for a AC97 - * controller to determine, if write access is done or not - so: How - * is Xilinx able to provide a RAF bit for write access? - * => very strange, thus just don't check RAF bit (compare with - * Xilinx's example app in EDK 8.1i) and wait - */ - schedule_timeout_uninterruptible(HZ / CODEC_WAIT_AFTER_WRITE); -#endif - PDEBUG(CODEC_SUCCESS, "codec_write(): (done) " - "reg=0x%x, value=%d / 0x%x (no RAF check)\n", - reg, val, val); -#endif - mutex_unlock(&ml403_ac97cr->cdc_mutex); - return; -} - -static int -snd_ml403_ac97cr_chip_init(struct snd_ml403_ac97cr *ml403_ac97cr) -{ - unsigned long end_time; - PDEBUG(INIT_INFO, "chip_init():\n"); - end_time = jiffies + HZ / CODEC_TIMEOUT_ON_INIT; - do { - if (in_be32(CR_REG(ml403_ac97cr, STATUS)) & CR_CODECREADY) { - /* clear both hardware FIFOs */ - out_be32(CR_REG(ml403_ac97cr, RESETFIFO), - CR_RECRESET | CR_PLAYRESET); - PDEBUG(INIT_INFO, "chip_init(): (done)\n"); - return 0; - } - schedule_timeout_uninterruptible(1); - } while (time_after(end_time, jiffies)); - snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": " - "timeout while waiting for codec, " - "not ready!\n"); - return -EBUSY; -} - -static int snd_ml403_ac97cr_free(struct snd_ml403_ac97cr *ml403_ac97cr) -{ - PDEBUG(INIT_INFO, "free():\n"); - /* irq release */ - if (ml403_ac97cr->irq >= 0) - free_irq(ml403_ac97cr->irq, ml403_ac97cr); - if (ml403_ac97cr->capture_irq >= 0) - free_irq(ml403_ac97cr->capture_irq, ml403_ac97cr); - /* give back "port" */ - iounmap(ml403_ac97cr->port); - kfree(ml403_ac97cr); - PDEBUG(INIT_INFO, "free(): (done)\n"); - return 0; -} - -static int snd_ml403_ac97cr_dev_free(struct snd_device *snddev) -{ - struct snd_ml403_ac97cr *ml403_ac97cr = snddev->device_data; - PDEBUG(INIT_INFO, "dev_free():\n"); - return snd_ml403_ac97cr_free(ml403_ac97cr); -} - -static int -snd_ml403_ac97cr_create(struct snd_card *card, struct platform_device *pfdev, - struct snd_ml403_ac97cr **rml403_ac97cr) -{ - struct snd_ml403_ac97cr *ml403_ac97cr; - int err; - static const struct snd_device_ops ops = { - .dev_free = snd_ml403_ac97cr_dev_free, - }; - struct resource *resource; - int irq; - - *rml403_ac97cr = NULL; - ml403_ac97cr = kzalloc(sizeof(*ml403_ac97cr), GFP_KERNEL); - if (ml403_ac97cr == NULL) - return -ENOMEM; - spin_lock_init(&ml403_ac97cr->reg_lock); - mutex_init(&ml403_ac97cr->cdc_mutex); - ml403_ac97cr->card = card; - ml403_ac97cr->pfdev = pfdev; - ml403_ac97cr->irq = -1; - ml403_ac97cr->enable_irq = 0; - ml403_ac97cr->capture_irq = -1; - ml403_ac97cr->enable_capture_irq = 0; - ml403_ac97cr->port = NULL; - ml403_ac97cr->res_port = NULL; - - PDEBUG(INIT_INFO, "Trying to reserve resources now ...\n"); - resource = platform_get_resource(pfdev, IORESOURCE_MEM, 0); - /* get "port" */ - ml403_ac97cr->port = ioremap(resource->start, - (resource->end) - - (resource->start) + 1); - if (ml403_ac97cr->port == NULL) { - snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": " - "unable to remap memory region (%pR)\n", - resource); - snd_ml403_ac97cr_free(ml403_ac97cr); - return -EBUSY; - } - snd_printk(KERN_INFO SND_ML403_AC97CR_DRIVER ": " - "remap controller memory region to " - "0x%x done\n", (unsigned int)ml403_ac97cr->port); - /* get irq */ - irq = platform_get_irq(pfdev, 0); - if (request_irq(irq, snd_ml403_ac97cr_irq, 0, - dev_name(&pfdev->dev), (void *)ml403_ac97cr)) { - snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": " - "unable to grab IRQ %d\n", - irq); - snd_ml403_ac97cr_free(ml403_ac97cr); - return -EBUSY; - } - ml403_ac97cr->irq = irq; - snd_printk(KERN_INFO SND_ML403_AC97CR_DRIVER ": " - "request (playback) irq %d done\n", - ml403_ac97cr->irq); - irq = platform_get_irq(pfdev, 1); - if (request_irq(irq, snd_ml403_ac97cr_irq, 0, - dev_name(&pfdev->dev), (void *)ml403_ac97cr)) { - snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": " - "unable to grab IRQ %d\n", - irq); - snd_ml403_ac97cr_free(ml403_ac97cr); - return -EBUSY; - } - ml403_ac97cr->capture_irq = irq; - snd_printk(KERN_INFO SND_ML403_AC97CR_DRIVER ": " - "request (capture) irq %d done\n", - ml403_ac97cr->capture_irq); - - err = snd_ml403_ac97cr_chip_init(ml403_ac97cr); - if (err < 0) { - snd_ml403_ac97cr_free(ml403_ac97cr); - return err; - } - - err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ml403_ac97cr, &ops); - if (err < 0) { - PDEBUG(INIT_FAILURE, "probe(): snd_device_new() failed!\n"); - snd_ml403_ac97cr_free(ml403_ac97cr); - return err; - } - - *rml403_ac97cr = ml403_ac97cr; - return 0; -} - -static void snd_ml403_ac97cr_mixer_free(struct snd_ac97 *ac97) -{ - struct snd_ml403_ac97cr *ml403_ac97cr = ac97->private_data; - PDEBUG(INIT_INFO, "mixer_free():\n"); - ml403_ac97cr->ac97 = NULL; - PDEBUG(INIT_INFO, "mixer_free(): (done)\n"); -} - -static int -snd_ml403_ac97cr_mixer(struct snd_ml403_ac97cr *ml403_ac97cr) -{ - struct snd_ac97_bus *bus; - struct snd_ac97_template ac97; - int err; - static const struct snd_ac97_bus_ops ops = { - .write = snd_ml403_ac97cr_codec_write, - .read = snd_ml403_ac97cr_codec_read, - }; - PDEBUG(INIT_INFO, "mixer():\n"); - err = snd_ac97_bus(ml403_ac97cr->card, 0, &ops, NULL, &bus); - if (err < 0) - return err; - - memset(&ac97, 0, sizeof(ac97)); - ml403_ac97cr->ac97_fake = 1; - lm4550_regfile_init(); -#ifdef CODEC_STAT - ml403_ac97cr->ac97_read = 0; - ml403_ac97cr->ac97_write = 0; -#endif - ac97.private_data = ml403_ac97cr; - ac97.private_free = snd_ml403_ac97cr_mixer_free; - ac97.scaps = AC97_SCAP_AUDIO | AC97_SCAP_SKIP_MODEM | - AC97_SCAP_NO_SPDIF; - err = snd_ac97_mixer(bus, &ac97, &ml403_ac97cr->ac97); - ml403_ac97cr->ac97_fake = 0; - lm4550_regfile_write_values_after_init(ml403_ac97cr->ac97); - PDEBUG(INIT_INFO, "mixer(): (done) snd_ac97_mixer()=%d\n", err); - return err; -} - -static int -snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device) -{ - struct snd_pcm *pcm; - int err; - - err = snd_pcm_new(ml403_ac97cr->card, "ML403AC97CR/1", device, 1, 1, - &pcm); - if (err < 0) - return err; - snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, - &snd_ml403_ac97cr_playback_ops); - snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, - &snd_ml403_ac97cr_capture_ops); - pcm->private_data = ml403_ac97cr; - pcm->info_flags = 0; - strcpy(pcm->name, "ML403AC97CR DAC/ADC"); - ml403_ac97cr->pcm = pcm; - - snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, - NULL, - 64 * 1024, - 128 * 1024); - return 0; -} - -static int snd_ml403_ac97cr_probe(struct platform_device *pfdev) -{ - struct snd_card *card; - struct snd_ml403_ac97cr *ml403_ac97cr = NULL; - int err; - int dev = pfdev->id; - - if (dev >= SNDRV_CARDS) - return -ENODEV; - if (!enable[dev]) - return -ENOENT; - - err = snd_card_new(&pfdev->dev, index[dev], id[dev], THIS_MODULE, - 0, &card); - if (err < 0) - return err; - err = snd_ml403_ac97cr_create(card, pfdev, &ml403_ac97cr); - if (err < 0) { - PDEBUG(INIT_FAILURE, "probe(): create failed!\n"); - snd_card_free(card); - return err; - } - PDEBUG(INIT_INFO, "probe(): create done\n"); - card->private_data = ml403_ac97cr; - err = snd_ml403_ac97cr_mixer(ml403_ac97cr); - if (err < 0) { - snd_card_free(card); - return err; - } - PDEBUG(INIT_INFO, "probe(): mixer done\n"); - err = snd_ml403_ac97cr_pcm(ml403_ac97cr, 0); - if (err < 0) { - snd_card_free(card); - return err; - } - PDEBUG(INIT_INFO, "probe(): PCM done\n"); - strcpy(card->driver, SND_ML403_AC97CR_DRIVER); - strcpy(card->shortname, "ML403 AC97 Controller Reference"); - sprintf(card->longname, "%s %s at 0x%lx, irq %i & %i, device %i", - card->shortname, card->driver, - (unsigned long)ml403_ac97cr->port, ml403_ac97cr->irq, - ml403_ac97cr->capture_irq, dev + 1); - - err = snd_card_register(card); - if (err < 0) { - snd_card_free(card); - return err; - } - platform_set_drvdata(pfdev, card); - PDEBUG(INIT_INFO, "probe(): (done)\n"); - return 0; -} - -static int snd_ml403_ac97cr_remove(struct platform_device *pfdev) -{ - snd_card_free(platform_get_drvdata(pfdev)); - return 0; -} - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:" SND_ML403_AC97CR_DRIVER); - -static struct platform_driver snd_ml403_ac97cr_driver = { - .probe = snd_ml403_ac97cr_probe, - .remove = snd_ml403_ac97cr_remove, - .driver = { - .name = SND_ML403_AC97CR_DRIVER, - }, -}; - -module_platform_driver(snd_ml403_ac97cr_driver); diff --git a/sound/drivers/pcm-indirect2.c b/sound/drivers/pcm-indirect2.c deleted file mode 100644 index 4c491d0ff071..000000000000 --- a/sound/drivers/pcm-indirect2.c +++ /dev/null @@ -1,560 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Helper functions for indirect PCM data transfer to a simple FIFO in - * hardware (small, no possibility to read "hardware io position", - * updating position done by interrupt, ...) - * - * Copyright (c) by 2007 Joachim Foerster - * - * Based on "pcm-indirect.h" (alsa-driver-1.0.13) by - * - * Copyright (c) by Takashi Iwai - * Jaroslav Kysela - */ - -/* snd_printk/d() */ -#include -/* struct snd_pcm_substream, struct snd_pcm_runtime, snd_pcm_uframes_t - * snd_pcm_period_elapsed() */ -#include - -#include "pcm-indirect2.h" - -#ifdef SND_PCM_INDIRECT2_STAT -/* jiffies */ -#include - -void snd_pcm_indirect2_stat(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec) -{ - struct snd_pcm_runtime *runtime = substream->runtime; - int i; - int j; - int k; - int seconds = (rec->lastbytetime - rec->firstbytetime) / HZ; - - snd_printk(KERN_DEBUG "STAT: mul_elapsed: %u, mul_elapsed_real: %d, " - "irq_occurred: %d\n", - rec->mul_elapsed, rec->mul_elapsed_real, rec->irq_occured); - snd_printk(KERN_DEBUG "STAT: min_multiple: %d (irqs/period)\n", - rec->min_multiple); - snd_printk(KERN_DEBUG "STAT: firstbytetime: %lu, lastbytetime: %lu, " - "firstzerotime: %lu\n", - rec->firstbytetime, rec->lastbytetime, rec->firstzerotime); - snd_printk(KERN_DEBUG "STAT: bytes2hw: %u Bytes => (by runtime->rate) " - "length: %d s\n", - rec->bytes2hw, rec->bytes2hw / 2 / 2 / runtime->rate); - snd_printk(KERN_DEBUG "STAT: (by measurement) length: %d => " - "rate: %d Bytes/s = %d Frames/s|Hz\n", - seconds, rec->bytes2hw / seconds, - rec->bytes2hw / 2 / 2 / seconds); - snd_printk(KERN_DEBUG - "STAT: zeros2hw: %u = %d ms ~ %d * %d zero copies\n", - rec->zeros2hw, ((rec->zeros2hw / 2 / 2) * 1000) / - runtime->rate, - rec->zeros2hw / (rec->hw_buffer_size / 2), - (rec->hw_buffer_size / 2)); - snd_printk(KERN_DEBUG "STAT: pointer_calls: %u, lastdifftime: %u\n", - rec->pointer_calls, rec->lastdifftime); - snd_printk(KERN_DEBUG "STAT: sw_io: %d, sw_data: %d\n", rec->sw_io, - rec->sw_data); - snd_printk(KERN_DEBUG "STAT: byte_sizes[]:\n"); - k = 0; - for (j = 0; j < 8; j++) { - for (i = j * 8; i < (j + 1) * 8; i++) - if (rec->byte_sizes[i] != 0) { - snd_printk(KERN_DEBUG "%u: %u", - i, rec->byte_sizes[i]); - k++; - } - if (((k % 8) == 0) && (k != 0)) { - snd_printk(KERN_DEBUG "\n"); - k = 0; - } - } - snd_printk(KERN_DEBUG "\n"); - snd_printk(KERN_DEBUG "STAT: zero_sizes[]:\n"); - for (j = 0; j < 8; j++) { - k = 0; - for (i = j * 8; i < (j + 1) * 8; i++) - if (rec->zero_sizes[i] != 0) - snd_printk(KERN_DEBUG "%u: %u", - i, rec->zero_sizes[i]); - else - k++; - if (!k) - snd_printk(KERN_DEBUG "\n"); - } - snd_printk(KERN_DEBUG "\n"); - snd_printk(KERN_DEBUG "STAT: min_adds[]:\n"); - for (j = 0; j < 8; j++) { - if (rec->min_adds[j] != 0) - snd_printk(KERN_DEBUG "%u: %u", j, rec->min_adds[j]); - } - snd_printk(KERN_DEBUG "\n"); - snd_printk(KERN_DEBUG "STAT: mul_adds[]:\n"); - for (j = 0; j < 8; j++) { - if (rec->mul_adds[j] != 0) - snd_printk(KERN_DEBUG "%u: %u", j, rec->mul_adds[j]); - } - snd_printk(KERN_DEBUG "\n"); - snd_printk(KERN_DEBUG - "STAT: zero_times_saved: %d, zero_times_notsaved: %d\n", - rec->zero_times_saved, rec->zero_times_notsaved); - /* snd_printk(KERN_DEBUG "STAT: zero_times[]\n"); - i = 0; - for (j = 0; j < 3750; j++) { - if (rec->zero_times[j] != 0) { - snd_printk(KERN_DEBUG "%u: %u", j, rec->zero_times[j]); - i++; - } - if (((i % 8) == 0) && (i != 0)) - snd_printk(KERN_DEBUG "\n"); - } - snd_printk(KERN_DEBUG "\n"); */ - return; -} -#endif - -/* - * _internal_ helper function for playback/capture transfer function - */ -static void -snd_pcm_indirect2_increase_min_periods(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - int isplay, int iscopy, - unsigned int bytes) -{ - if (rec->min_periods >= 0) { - if (iscopy) { - rec->sw_io += bytes; - if (rec->sw_io >= rec->sw_buffer_size) - rec->sw_io -= rec->sw_buffer_size; - } else if (isplay) { - /* If application does not write data in multiples of - * a period, move sw_data to the next correctly aligned - * position, so that sw_io can converge to it (in the - * next step). - */ - if (!rec->check_alignment) { - if (rec->bytes2hw % - snd_pcm_lib_period_bytes(substream)) { - unsigned bytes2hw_aligned = - (1 + - (rec->bytes2hw / - snd_pcm_lib_period_bytes - (substream))) * - snd_pcm_lib_period_bytes - (substream); - rec->sw_data = - bytes2hw_aligned % - rec->sw_buffer_size; -#ifdef SND_PCM_INDIRECT2_STAT - snd_printk(KERN_DEBUG - "STAT: @re-align: aligned " - "bytes2hw to next period " - "size boundary: %d " - "(instead of %d)\n", - bytes2hw_aligned, - rec->bytes2hw); - snd_printk(KERN_DEBUG - "STAT: @re-align: sw_data " - "moves to: %d\n", - rec->sw_data); -#endif - } - rec->check_alignment = 1; - } - /* We are at the end and are copying zeros into the - * fifo. - * Now, we have to make sure that sw_io is increased - * until the position of sw_data: Filling the fifo with - * the first zeros means, the last bytes were played. - */ - if (rec->sw_io != rec->sw_data) { - unsigned int diff; - if (rec->sw_data > rec->sw_io) - diff = rec->sw_data - rec->sw_io; - else - diff = (rec->sw_buffer_size - - rec->sw_io) + - rec->sw_data; - if (bytes >= diff) - rec->sw_io = rec->sw_data; - else { - rec->sw_io += bytes; - if (rec->sw_io >= rec->sw_buffer_size) - rec->sw_io -= - rec->sw_buffer_size; - } - } - } - rec->min_period_count += bytes; - if (rec->min_period_count >= (rec->hw_buffer_size / 2)) { - rec->min_periods += (rec->min_period_count / - (rec->hw_buffer_size / 2)); -#ifdef SND_PCM_INDIRECT2_STAT - if ((rec->min_period_count / - (rec->hw_buffer_size / 2)) > 7) - snd_printk(KERN_DEBUG - "STAT: more than 7 (%d) min_adds " - "at once - too big to save!\n", - (rec->min_period_count / - (rec->hw_buffer_size / 2))); - else - rec->min_adds[(rec->min_period_count / - (rec->hw_buffer_size / 2))]++; -#endif - rec->min_period_count = (rec->min_period_count % - (rec->hw_buffer_size / 2)); - } - } else if (isplay && iscopy) - rec->min_periods = 0; -} - -/* - * helper function for playback/capture pointer callback - */ -snd_pcm_uframes_t -snd_pcm_indirect2_pointer(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec) -{ -#ifdef SND_PCM_INDIRECT2_STAT - rec->pointer_calls++; -#endif - return bytes_to_frames(substream->runtime, rec->sw_io); -} - -/* - * _internal_ helper function for playback interrupt callback - */ -static void -snd_pcm_indirect2_playback_transfer(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - snd_pcm_indirect2_copy_t copy, - snd_pcm_indirect2_zero_t zero) -{ - struct snd_pcm_runtime *runtime = substream->runtime; - snd_pcm_uframes_t appl_ptr = runtime->control->appl_ptr; - - /* runtime->control->appl_ptr: position where ALSA will write next time - * rec->appl_ptr: position where ALSA was last time - * diff: obviously ALSA wrote that much bytes into the intermediate - * buffer since we checked last time - */ - snd_pcm_sframes_t diff = appl_ptr - rec->appl_ptr; - - if (diff) { -#ifdef SND_PCM_INDIRECT2_STAT - rec->lastdifftime = jiffies; -#endif - if (diff < -(snd_pcm_sframes_t) (runtime->boundary / 2)) - diff += runtime->boundary; - /* number of bytes "added" by ALSA increases the number of - * bytes which are ready to "be transferred to HW"/"played" - * Then, set rec->appl_ptr to not count bytes twice next time. - */ - rec->sw_ready += (int)frames_to_bytes(runtime, diff); - rec->appl_ptr = appl_ptr; - } - if (rec->hw_ready && (rec->sw_ready <= 0)) { - unsigned int bytes; - -#ifdef SND_PCM_INDIRECT2_STAT - if (rec->firstzerotime == 0) { - rec->firstzerotime = jiffies; - snd_printk(KERN_DEBUG - "STAT: @firstzerotime: mul_elapsed: %d, " - "min_period_count: %d\n", - rec->mul_elapsed, rec->min_period_count); - snd_printk(KERN_DEBUG - "STAT: @firstzerotime: sw_io: %d, " - "sw_data: %d, appl_ptr: %u\n", - rec->sw_io, rec->sw_data, - (unsigned int)appl_ptr); - } - if ((jiffies - rec->firstzerotime) < 3750) { - rec->zero_times[(jiffies - rec->firstzerotime)]++; - rec->zero_times_saved++; - } else - rec->zero_times_notsaved++; -#endif - bytes = zero(substream, rec); - -#ifdef SND_PCM_INDIRECT2_STAT - rec->zeros2hw += bytes; - if (bytes < 64) - rec->zero_sizes[bytes]++; - else - snd_printk(KERN_DEBUG - "STAT: %d zero Bytes copied to hardware at " - "once - too big to save!\n", - bytes); -#endif - snd_pcm_indirect2_increase_min_periods(substream, rec, 1, 0, - bytes); - return; - } - while (rec->hw_ready && (rec->sw_ready > 0)) { - /* sw_to_end: max. number of bytes that can be read/take from - * the current position (sw_data) in _one_ step - */ - unsigned int sw_to_end = rec->sw_buffer_size - rec->sw_data; - - /* bytes: number of bytes we have available (for reading) */ - unsigned int bytes = rec->sw_ready; - - if (sw_to_end < bytes) - bytes = sw_to_end; - if (!bytes) - break; - -#ifdef SND_PCM_INDIRECT2_STAT - if (rec->firstbytetime == 0) - rec->firstbytetime = jiffies; - rec->lastbytetime = jiffies; -#endif - /* copy bytes from intermediate buffer position sw_data to the - * HW and return number of bytes actually written - * Furthermore, set hw_ready to 0, if the fifo isn't empty - * now => more could be transferred to fifo - */ - bytes = copy(substream, rec, bytes); - rec->bytes2hw += bytes; - -#ifdef SND_PCM_INDIRECT2_STAT - if (bytes < 64) - rec->byte_sizes[bytes]++; - else - snd_printk(KERN_DEBUG - "STAT: %d Bytes copied to hardware at once " - "- too big to save!\n", - bytes); -#endif - /* increase sw_data by the number of actually written bytes - * (= number of taken bytes from intermediate buffer) - */ - rec->sw_data += bytes; - if (rec->sw_data == rec->sw_buffer_size) - rec->sw_data = 0; - /* now sw_data is the position where ALSA is going to write - * in the intermediate buffer next time = position we are going - * to read from next time - */ - - snd_pcm_indirect2_increase_min_periods(substream, rec, 1, 1, - bytes); - - /* we read bytes from intermediate buffer, so we need to say - * that the number of bytes ready for transfer are decreased - * now - */ - rec->sw_ready -= bytes; - } - return; -} - -/* - * helper function for playback interrupt routine - */ -void -snd_pcm_indirect2_playback_interrupt(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - snd_pcm_indirect2_copy_t copy, - snd_pcm_indirect2_zero_t zero) -{ -#ifdef SND_PCM_INDIRECT2_STAT - rec->irq_occured++; -#endif - /* hardware played some bytes, so there is room again (in fifo) */ - rec->hw_ready = 1; - - /* don't call ack() now, instead call transfer() function directly - * (normally called by ack() ) - */ - snd_pcm_indirect2_playback_transfer(substream, rec, copy, zero); - - if (rec->min_periods >= rec->min_multiple) { -#ifdef SND_PCM_INDIRECT2_STAT - if ((rec->min_periods / rec->min_multiple) > 7) - snd_printk(KERN_DEBUG - "STAT: more than 7 (%d) mul_adds - too big " - "to save!\n", - (rec->min_periods / rec->min_multiple)); - else - rec->mul_adds[(rec->min_periods / - rec->min_multiple)]++; - rec->mul_elapsed_real += (rec->min_periods / - rec->min_multiple); - rec->mul_elapsed++; -#endif - rec->min_periods = (rec->min_periods % rec->min_multiple); - snd_pcm_period_elapsed(substream); - } -} - -/* - * _internal_ helper function for capture interrupt callback - */ -static void -snd_pcm_indirect2_capture_transfer(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - snd_pcm_indirect2_copy_t copy, - snd_pcm_indirect2_zero_t null) -{ - struct snd_pcm_runtime *runtime = substream->runtime; - snd_pcm_uframes_t appl_ptr = runtime->control->appl_ptr; - snd_pcm_sframes_t diff = appl_ptr - rec->appl_ptr; - - if (diff) { -#ifdef SND_PCM_INDIRECT2_STAT - rec->lastdifftime = jiffies; -#endif - if (diff < -(snd_pcm_sframes_t) (runtime->boundary / 2)) - diff += runtime->boundary; - rec->sw_ready -= frames_to_bytes(runtime, diff); - rec->appl_ptr = appl_ptr; - } - /* if hardware has something, but the intermediate buffer is full - * => skip contents of buffer - */ - if (rec->hw_ready && (rec->sw_ready >= (int)rec->sw_buffer_size)) { - unsigned int bytes; - -#ifdef SND_PCM_INDIRECT2_STAT - if (rec->firstzerotime == 0) { - rec->firstzerotime = jiffies; - snd_printk(KERN_DEBUG "STAT: (capture) " - "@firstzerotime: mul_elapsed: %d, " - "min_period_count: %d\n", - rec->mul_elapsed, rec->min_period_count); - snd_printk(KERN_DEBUG "STAT: (capture) " - "@firstzerotime: sw_io: %d, sw_data: %d, " - "appl_ptr: %u\n", - rec->sw_io, rec->sw_data, - (unsigned int)appl_ptr); - } - if ((jiffies - rec->firstzerotime) < 3750) { - rec->zero_times[(jiffies - rec->firstzerotime)]++; - rec->zero_times_saved++; - } else - rec->zero_times_notsaved++; -#endif - bytes = null(substream, rec); - -#ifdef SND_PCM_INDIRECT2_STAT - rec->zeros2hw += bytes; - if (bytes < 64) - rec->zero_sizes[bytes]++; - else - snd_printk(KERN_DEBUG - "STAT: (capture) %d zero Bytes copied to " - "hardware at once - too big to save!\n", - bytes); -#endif - snd_pcm_indirect2_increase_min_periods(substream, rec, 0, 0, - bytes); - /* report an overrun */ - rec->sw_io = SNDRV_PCM_POS_XRUN; - return; - } - while (rec->hw_ready && (rec->sw_ready < (int)rec->sw_buffer_size)) { - /* sw_to_end: max. number of bytes that we can write to the - * intermediate buffer (until it's end) - */ - size_t sw_to_end = rec->sw_buffer_size - rec->sw_data; - - /* bytes: max. number of bytes, which may be copied to the - * intermediate buffer without overflow (in _one_ step) - */ - size_t bytes = rec->sw_buffer_size - rec->sw_ready; - - /* limit number of bytes (for transfer) by available room in - * the intermediate buffer - */ - if (sw_to_end < bytes) - bytes = sw_to_end; - if (!bytes) - break; - -#ifdef SND_PCM_INDIRECT2_STAT - if (rec->firstbytetime == 0) - rec->firstbytetime = jiffies; - rec->lastbytetime = jiffies; -#endif - /* copy bytes from the intermediate buffer (position sw_data) - * to the HW at most and return number of bytes actually copied - * from HW - * Furthermore, set hw_ready to 0, if the fifo is empty now. - */ - bytes = copy(substream, rec, bytes); - rec->bytes2hw += bytes; - -#ifdef SND_PCM_INDIRECT2_STAT - if (bytes < 64) - rec->byte_sizes[bytes]++; - else - snd_printk(KERN_DEBUG - "STAT: (capture) %d Bytes copied to " - "hardware at once - too big to save!\n", - bytes); -#endif - /* increase sw_data by the number of actually copied bytes from - * HW - */ - rec->sw_data += bytes; - if (rec->sw_data == rec->sw_buffer_size) - rec->sw_data = 0; - - snd_pcm_indirect2_increase_min_periods(substream, rec, 0, 1, - bytes); - - /* number of bytes in the intermediate buffer, which haven't - * been fetched by ALSA yet. - */ - rec->sw_ready += bytes; - } - return; -} - -/* - * helper function for capture interrupt routine - */ -void -snd_pcm_indirect2_capture_interrupt(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - snd_pcm_indirect2_copy_t copy, - snd_pcm_indirect2_zero_t null) -{ -#ifdef SND_PCM_INDIRECT2_STAT - rec->irq_occured++; -#endif - /* hardware recorded some bytes, so there is something to read from the - * record fifo: - */ - rec->hw_ready = 1; - - /* don't call ack() now, instead call transfer() function directly - * (normally called by ack() ) - */ - snd_pcm_indirect2_capture_transfer(substream, rec, copy, null); - - if (rec->min_periods >= rec->min_multiple) { - -#ifdef SND_PCM_INDIRECT2_STAT - if ((rec->min_periods / rec->min_multiple) > 7) - snd_printk(KERN_DEBUG - "STAT: more than 7 (%d) mul_adds - " - "too big to save!\n", - (rec->min_periods / rec->min_multiple)); - else - rec->mul_adds[(rec->min_periods / - rec->min_multiple)]++; - rec->mul_elapsed_real += (rec->min_periods / - rec->min_multiple); - rec->mul_elapsed++; -#endif - rec->min_periods = (rec->min_periods % rec->min_multiple); - snd_pcm_period_elapsed(substream); - } -} diff --git a/sound/drivers/pcm-indirect2.h b/sound/drivers/pcm-indirect2.h deleted file mode 100644 index 355ce76d2403..000000000000 --- a/sound/drivers/pcm-indirect2.h +++ /dev/null @@ -1,127 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Helper functions for indirect PCM data transfer to a simple FIFO in - * hardware (small, no possibility to read "hardware io position", - * updating position done by interrupt, ...) - * - * Copyright (c) by 2007 Joachim Foerster - * - * Based on "pcm-indirect.h" (alsa-driver-1.0.13) by - * - * Copyright (c) by Takashi Iwai - * Jaroslav Kysela - */ - -#ifndef __SOUND_PCM_INDIRECT2_H -#define __SOUND_PCM_INDIRECT2_H - -/* struct snd_pcm_substream, struct snd_pcm_runtime, snd_pcm_uframes_t */ -#include - -/* Debug options for code which may be removed completely in a final version */ -#ifdef CONFIG_SND_DEBUG -#define SND_PCM_INDIRECT2_STAT /* turn on some "statistics" about the - * process of copying bytes from the - * intermediate buffer to the hardware - * fifo and the other way round - */ -#endif - -struct snd_pcm_indirect2 { - unsigned int hw_buffer_size; /* Byte size of hardware buffer */ - int hw_ready; /* playback: 1 = hw fifo has room left, - * 0 = hw fifo is full - */ - unsigned int min_multiple; - int min_periods; /* counts number of min. periods until - * min_multiple is reached - */ - int min_period_count; /* counts bytes to count number of - * min. periods - */ - - unsigned int sw_buffer_size; /* Byte size of software buffer */ - - /* sw_data: position in intermediate buffer, where we will read (or - * write) from/to next time (to transfer data to/from HW) - */ - unsigned int sw_data; /* Offset to next dst (or src) in sw - * ring buffer - */ - /* easiest case (playback): - * sw_data is nearly the same as ~ runtime->control->appl_ptr, with the - * exception that sw_data is "behind" by the number if bytes ALSA wrote - * to the intermediate buffer last time. - * A call to ack() callback synchronizes both indirectly. - */ - - /* We have no real sw_io pointer here. Usually sw_io is pointing to the - * current playback/capture position _inside_ the hardware. Devices - * with plain FIFOs often have no possibility to publish this position. - * So we say: if sw_data is updated, that means bytes were copied to - * the hardware, we increase sw_io by that amount, because there have - * to be as much bytes which were played. So sw_io will stay behind - * sw_data all the time and has to converge to sw_data at the end of - * playback. - */ - unsigned int sw_io; /* Current software pointer in bytes */ - - /* sw_ready: number of bytes ALSA copied to the intermediate buffer, so - * it represents the number of bytes which wait for transfer to the HW - */ - int sw_ready; /* Bytes ready to be transferred to/from hw */ - - /* appl_ptr: last known position of ALSA (where ALSA is going to write - * next time into the intermediate buffer - */ - snd_pcm_uframes_t appl_ptr; /* Last seen appl_ptr */ - - unsigned int bytes2hw; - int check_alignment; - -#ifdef SND_PCM_INDIRECT2_STAT - unsigned int zeros2hw; - unsigned int mul_elapsed; - unsigned int mul_elapsed_real; - unsigned long firstbytetime; - unsigned long lastbytetime; - unsigned long firstzerotime; - unsigned int byte_sizes[64]; - unsigned int zero_sizes[64]; - unsigned int min_adds[8]; - unsigned int mul_adds[8]; - unsigned int zero_times[3750]; /* = 15s */ - unsigned int zero_times_saved; - unsigned int zero_times_notsaved; - unsigned int irq_occured; - unsigned int pointer_calls; - unsigned int lastdifftime; -#endif -}; - -typedef size_t (*snd_pcm_indirect2_copy_t) (struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - size_t bytes); -typedef size_t (*snd_pcm_indirect2_zero_t) (struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec); - -#ifdef SND_PCM_INDIRECT2_STAT -void snd_pcm_indirect2_stat(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec); -#endif - -snd_pcm_uframes_t -snd_pcm_indirect2_pointer(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec); -void -snd_pcm_indirect2_playback_interrupt(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - snd_pcm_indirect2_copy_t copy, - snd_pcm_indirect2_zero_t zero); -void -snd_pcm_indirect2_capture_interrupt(struct snd_pcm_substream *substream, - struct snd_pcm_indirect2 *rec, - snd_pcm_indirect2_copy_t copy, - snd_pcm_indirect2_zero_t null); - -#endif /* __SOUND_PCM_INDIRECT2_H */ -- cgit v1.2.3-59-g8ed1b From 2c74e2586bb96012ffc05f1c819b05d9cad86d6e Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:55:53 +0000 Subject: powerpc/40x: Rework 40x PTE access and TLB miss Commit 1bc54c03117b ("powerpc: rework 4xx PTE access and TLB miss") reworked 44x PTE access to avoid atomic pte updates, and left 8xx, 40x and fsl booke with atomic pte updates. Commit 6cfd8990e27d ("powerpc: rework FSL Book-E PTE access and TLB miss") removed atomic pte updates on fsl booke. It went away on 8xx with commit ddfc20a3b9ae ("powerpc/8xx: Remove PTE_ATOMIC_UPDATES"). 40x is the last platform setting PTE_ATOMIC_UPDATES. Rework PTE access and TLB miss to remove PTE_ATOMIC_UPDATES for 40x: - Always handle DSI as a fault. - Bail out of TLB miss handler when CONFIG_SWAP is set and _PAGE_ACCESSED is not set. - Bail out of ITLB miss handler when _PAGE_EXEC is not set. - Only set WR bit when both _PAGE_RW and _PAGE_DIRTY are set. - Remove _PAGE_HWWRITE - Don't require PTE_ATOMIC_UPDATES anymore Reported-by: kbuild test robot Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/99a0fcd337ef67088140d1647d75fea026a70413.1590079968.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/pte-40x.h | 23 +--- arch/powerpc/include/asm/nohash/pgtable.h | 2 - arch/powerpc/kernel/head_40x.S | 177 +++++---------------------- arch/powerpc/mm/nohash/40x.c | 4 +- 4 files changed, 34 insertions(+), 172 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index 12c6811e344b..2d3153cfc0d7 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -44,9 +44,8 @@ #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ #define _PAGE_SPECIAL 0x020 /* software: Special page */ -#define _PAGE_RW 0x040 /* software: Writes permitted */ #define _PAGE_DIRTY 0x080 /* software: dirty page */ -#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ +#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */ #define _PAGE_EXEC 0x200 /* hardware: EX permission */ #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ @@ -58,8 +57,8 @@ #define _PAGE_KERNEL_RO 0 #define _PAGE_KERNEL_ROX _PAGE_EXEC -#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) -#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC) +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ #define _PMD_PRESENT_MASK _PMD_PRESENT @@ -85,21 +84,5 @@ #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#ifndef __ASSEMBLY__ -static inline pte_t pte_wrprotect(pte_t pte) -{ - return __pte(pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE)); -} - -#define pte_wrprotect pte_wrprotect - -static inline pte_t pte_mkclean(pte_t pte) -{ - return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); -} - -#define pte_mkclean pte_mkclean -#endif - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index f27c967d9269..50a4b0bb8d16 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -130,12 +130,10 @@ static inline pte_t pte_exprotect(pte_t pte) return __pte(pte_val(pte) & ~_PAGE_EXEC); } -#ifndef pte_mkclean static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_DIRTY); } -#endif static inline pte_t pte_mkold(pte_t pte) { diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 2cec543c38f0..f45d71ada48b 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -176,135 +176,16 @@ _ENTRY(saved_ksp_limit) * 0x0300 - Data Storage Exception * This happens for just a few reasons. U0 set (but we don't do that), * or zone protection fault (user violation, write to protected page). - * If this is just an update of modified status, we do that quickly - * and exit. Otherwise, we call heavywight functions to do the work. + * The other Data TLB exceptions bail out to this point + * if they can't resolve the lightweight TLB fault. */ START_EXCEPTION(0x0300, DataStorage) - mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */ - mtspr SPRN_SPRG_SCRATCH1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else - mtspr SPRN_SPRG_SCRATCH3, r12 - mtspr SPRN_SPRG_SCRATCH4, r9 - mfcr r11 - mfspr r12, SPRN_PID - mtspr SPRN_SPRG_SCRATCH6, r11 - mtspr SPRN_SPRG_SCRATCH5, r12 -#endif - - /* First, check if it was a zone fault (which means a user - * tried to access a kernel or read-protected page - always - * a SEGV). All other faults here must be stores, so no - * need to check ESR_DST as well. */ - mfspr r10, SPRN_ESR - andis. r10, r10, ESR_DIZ@h - bne 2f - - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, PAGE_OFFSET@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG_THREAD - lwz r11,PGDIR(r11) -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r11, 0(r11) /* Get L1 entry */ - rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ - - andi. r9, r11, _PAGE_RW /* Is it writeable? */ - beq 2f /* Bail if not */ - - /* Update 'changed'. - */ - ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE - stw r11, 0(r12) /* Update Linux page table */ - - /* Most of the Linux PTE is ready to load into the TLB LO. - * We set ZSEL, where only the LS-bit determines user access. - * We set execute, because we don't have the granularity to - * properly set this at the page level (Linux problem). - * If shared is set, we cause a zero PID->TID load. - * Many of these bits are software only. Bits we don't set - * here we (properly should) assume have the appropriate value. - */ - li r12, 0x0ce2 - andc r11, r11, r12 /* Make sure 20, 21 are zero */ - - /* find the TLB index that caused the fault. It has to be here. - */ - tlbsx r9, 0, r10 - - tlbwe r11, r9, TLB_DATA /* Load TLB LO */ - - /* Done...restore registers and get out of here. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG_SCRATCH5 - mfspr r11, SPRN_SPRG_SCRATCH6 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG_SCRATCH4 - mfspr r12, SPRN_SPRG_SCRATCH3 -#endif - mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r10, SPRN_SPRG_SCRATCH0 - PPC405_ERR77_SYNC - rfi /* Should sync shadow TLBs */ - b . /* prevent prefetch past rfi */ - -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG_SCRATCH5 - mfspr r11, SPRN_SPRG_SCRATCH6 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG_SCRATCH4 - mfspr r12, SPRN_SPRG_SCRATCH3 -#endif - mfspr r11, SPRN_SPRG_SCRATCH1 - mfspr r10, SPRN_SPRG_SCRATCH0 - b DataAccess + EXCEPTION_PROLOG + mfspr r5, SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + stw r5, _ESR(r11) + mfspr r4, SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + stw r4, _DEAR(r11) + EXC_XFER_LITE(0x300, handle_page_fault) /* * 0x0400 - Instruction Storage Exception @@ -415,11 +296,17 @@ _ENTRY(saved_ksp_limit) rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ lwz r11, 0(r12) /* Get Linux PTE */ - andi. r9, r11, _PAGE_PRESENT - beq 5f +#ifdef CONFIG_SWAP + li r9, _PAGE_PRESENT | _PAGE_ACCESSED +#else + li r9, _PAGE_PRESENT +#endif + andc. r9, r9, r11 /* Check permission */ + bne 5f - ori r11, r11, _PAGE_ACCESSED - stw r11, 0(r12) + rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */ + and r9, r9, r11 /* hwwrite = dirty & rw */ + rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */ /* Create TLB tag. This is the faulting address plus a static * set of bits. These are size, valid, E, U0. @@ -463,7 +350,7 @@ _ENTRY(saved_ksp_limit) #endif mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 - b DataAccess + b DataStorage /* 0x1200 - Instruction TLB Miss Exception * Nearly the same as above, except we get our information from different @@ -515,11 +402,17 @@ _ENTRY(saved_ksp_limit) rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ lwz r11, 0(r12) /* Get Linux PTE */ - andi. r9, r11, _PAGE_PRESENT - beq 5f +#ifdef CONFIG_SWAP + li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC +#else + li r9, _PAGE_PRESENT | _PAGE_EXEC +#endif + andc. r9, r9, r11 /* Check permission */ + bne 5f - ori r11, r11, _PAGE_ACCESSED - stw r11, 0(r12) + rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */ + and r9, r9, r11 /* hwwrite = dirty & rw */ + rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */ /* Create TLB tag. This is the faulting address plus a static * set of bits. These are size, valid, E, U0. @@ -669,18 +562,6 @@ WDTException: (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc) -/* - * The other Data TLB exceptions bail out to this point - * if they can't resolve the lightweight TLB fault. - */ -DataAccess: - EXCEPTION_PROLOG - mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ - stw r5,_ESR(r11) - mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ - stw r4, _DEAR(r11) - EXC_XFER_LITE(0x300, handle_page_fault) - /* Other PowerPC processors, namely those derived from the 6xx-series * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. * However, for the 4xx-series processors these are neither defined nor diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c index 82862723ab42..4eaf462cda30 100644 --- a/arch/powerpc/mm/nohash/40x.c +++ b/arch/powerpc/mm/nohash/40x.c @@ -102,7 +102,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) while (s >= LARGE_PAGE_SIZE_16M) { pmd_t *pmdp; - unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE; + unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW; pmdp = pmd_ptr_k(v); *pmdp++ = __pmd(val); @@ -117,7 +117,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) while (s >= LARGE_PAGE_SIZE_4M) { pmd_t *pmdp; - unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE; + unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW; pmdp = pmd_ptr_k(v); *pmdp = __pmd(val); -- cgit v1.2.3-59-g8ed1b From 4e1df545e2fae53e07c93b835c3dcc9d4917c849 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:55:54 +0000 Subject: powerpc/pgtable: Drop PTE_ATOMIC_UPDATES 40x was the last user of PTE_ATOMIC_UPDATES. Drop everything related to PTE_ATOMIC_UPDATES. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/dbe8438fd1ed3e500132c8ab70269d4e6cc84531.1590079968.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/pgtable.h | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 717f995d21b8..46cd5428fc52 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -259,25 +259,10 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, unsigned long clr, unsigned long set, int huge) { -#if defined(PTE_ATOMIC_UPDATES) && !defined(CONFIG_PTE_64BIT) - unsigned long old, tmp; - - __asm__ __volatile__("\ -1: lwarx %0,0,%3\n\ - andc %1,%0,%4\n\ - or %1,%1,%5\n" - PPC405_ERR77(0,%3) -" stwcx. %1,0,%3\n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" (clr), "r" (set), "m" (*p) - : "cc" ); -#else /* PTE_ATOMIC_UPDATES */ pte_basic_t old = pte_val(*p); pte_basic_t new = (old & ~(pte_basic_t)clr) | set; *p = __pte(new); -#endif /* !PTE_ATOMIC_UPDATES */ #ifdef CONFIG_44x if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) -- cgit v1.2.3-59-g8ed1b From 1b5c0967ab8aa9424cdd5108de4e055d8aeaa9d0 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:55:55 +0000 Subject: powerpc/40x: Remove support for IBM 403GCX CONFIG_403GCX is not user selectable and is not selected by any platform. Remove it. Signed-off-by: Christophe Leroy Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/635f8f5ce9d1f761b3bd8dc3e8ddad500cea26c4.1590079968.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/cache.h | 2 +- arch/powerpc/include/asm/reg_booke.h | 54 ------------------------------------ arch/powerpc/include/asm/time.h | 12 -------- arch/powerpc/kernel/cputable.c | 37 ------------------------ arch/powerpc/kernel/head_40x.S | 45 ------------------------------ arch/powerpc/kernel/misc_32.S | 9 ------ arch/powerpc/kernel/setup-common.c | 4 --- arch/powerpc/platforms/40x/Kconfig | 6 ---- 8 files changed, 1 insertion(+), 168 deletions(-) diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 609cab1d58f2..2124b7090db9 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -6,7 +6,7 @@ /* bytes per L1 cache line */ -#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) +#if defined(CONFIG_PPC_8xx) #define L1_CACHE_SHIFT 4 #define MAX_COPY_PREFETCH 1 #define IFETCH_ALIGN_SHIFT 2 diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index f26fe482fbca..ff30f1076162 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -663,60 +663,6 @@ #define EPC_EPID 0x00003fff #define EPC_EPID_SHIFT 0 -/* - * The IBM-403 is an even more odd special case, as it is much - * older than the IBM-405 series. We put these down here incase someone - * wishes to support these machines again. - */ -#ifdef CONFIG_403GCX -/* Special Purpose Registers (SPRNs)*/ -#define SPRN_TBHU 0x3CC /* Time Base High User-mode */ -#define SPRN_TBLU 0x3CD /* Time Base Low User-mode */ -#define SPRN_CDBCR 0x3D7 /* Cache Debug Control Register */ -#define SPRN_TBHI 0x3DC /* Time Base High */ -#define SPRN_TBLO 0x3DD /* Time Base Low */ -#define SPRN_DBCR 0x3F2 /* Debug Control Register */ -#define SPRN_PBL1 0x3FC /* Protection Bound Lower 1 */ -#define SPRN_PBL2 0x3FE /* Protection Bound Lower 2 */ -#define SPRN_PBU1 0x3FD /* Protection Bound Upper 1 */ -#define SPRN_PBU2 0x3FF /* Protection Bound Upper 2 */ - - -/* Bit definitions for the DBCR. */ -#define DBCR_EDM DBCR0_EDM -#define DBCR_IDM DBCR0_IDM -#define DBCR_RST(x) (((x) & 0x3) << 28) -#define DBCR_RST_NONE 0 -#define DBCR_RST_CORE 1 -#define DBCR_RST_CHIP 2 -#define DBCR_RST_SYSTEM 3 -#define DBCR_IC DBCR0_IC /* Instruction Completion Debug Evnt */ -#define DBCR_BT DBCR0_BT /* Branch Taken Debug Event */ -#define DBCR_EDE DBCR0_EDE /* Exception Debug Event */ -#define DBCR_TDE DBCR0_TDE /* TRAP Debug Event */ -#define DBCR_FER 0x00F80000 /* First Events Remaining Mask */ -#define DBCR_FT 0x00040000 /* Freeze Timers on Debug Event */ -#define DBCR_IA1 0x00020000 /* Instr. Addr. Compare 1 Enable */ -#define DBCR_IA2 0x00010000 /* Instr. Addr. Compare 2 Enable */ -#define DBCR_D1R 0x00008000 /* Data Addr. Compare 1 Read Enable */ -#define DBCR_D1W 0x00004000 /* Data Addr. Compare 1 Write Enable */ -#define DBCR_D1S(x) (((x) & 0x3) << 12) /* Data Adrr. Compare 1 Size */ -#define DAC_BYTE 0 -#define DAC_HALF 1 -#define DAC_WORD 2 -#define DAC_QUAD 3 -#define DBCR_D2R 0x00000800 /* Data Addr. Compare 2 Read Enable */ -#define DBCR_D2W 0x00000400 /* Data Addr. Compare 2 Write Enable */ -#define DBCR_D2S(x) (((x) & 0x3) << 8) /* Data Addr. Compare 2 Size */ -#define DBCR_SBT 0x00000040 /* Second Branch Taken Debug Event */ -#define DBCR_SED 0x00000020 /* Second Exception Debug Event */ -#define DBCR_STD 0x00000010 /* Second Trap Debug Event */ -#define DBCR_SIA 0x00000008 /* Second IAC Enable */ -#define DBCR_SDA 0x00000004 /* Second DAC Enable */ -#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */ -#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */ -#endif /* 403GCX */ - /* Some 476 specific registers */ #define SPRN_SSPCR 830 #define SPRN_USPCR 831 diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 39ce95016a3a..b287cfc2dd85 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -51,24 +51,12 @@ struct div_result { static inline unsigned long get_tbl(void) { -#if defined(CONFIG_403GCX) - unsigned long tbl; - asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); - return tbl; -#else return mftbl(); -#endif } static inline unsigned int get_tbu(void) { -#ifdef CONFIG_403GCX - unsigned int tbu; - asm volatile("mfspr %0, 0x3dc" : "=r" (tbu)); - return tbu; -#else return mftbu(); -#endif } #endif /* !CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index cae9764b929e..61bd8fb408b2 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1232,43 +1232,6 @@ static struct cpu_spec __initdata cpu_specs[] = { }, #endif /* CONFIG_PPC_8xx */ #ifdef CONFIG_40x - { /* 403GC */ - .pvr_mask = 0xffffff00, - .pvr_value = 0x00200200, - .cpu_name = "403GC", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 16, - .dcache_bsize = 16, - .machine_check = machine_check_4xx, - .platform = "ppc403", - }, - { /* 403GCX */ - .pvr_mask = 0xffffff00, - .pvr_value = 0x00201400, - .cpu_name = "403GCX", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 16, - .dcache_bsize = 16, - .machine_check = machine_check_4xx, - .platform = "ppc403", - }, - { /* 403G ?? */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00200000, - .cpu_name = "403G ??", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 16, - .dcache_bsize = 16, - .machine_check = machine_check_4xx, - .platform = "ppc403", - }, { /* 405GP */ .pvr_mask = 0xffff0000, .pvr_value = 0x40110000, diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index f45d71ada48b..5fe4b7ad864b 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -253,21 +253,12 @@ _ENTRY(saved_ksp_limit) START_EXCEPTION(0x1100, DTLBMiss) mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */ mtspr SPRN_SPRG_SCRATCH1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 mfcr r11 mfspr r12, SPRN_PID mtspr SPRN_SPRG_SCRATCH6, r11 mtspr SPRN_SPRG_SCRATCH5, r12 -#endif mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -333,21 +324,12 @@ _ENTRY(saved_ksp_limit) /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else mfspr r12, SPRN_SPRG_SCRATCH5 mfspr r11, SPRN_SPRG_SCRATCH6 mtspr SPRN_PID, r12 mtcr r11 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 -#endif mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 b DataStorage @@ -359,21 +341,12 @@ _ENTRY(saved_ksp_limit) START_EXCEPTION(0x1200, ITLBMiss) mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */ mtspr SPRN_SPRG_SCRATCH1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 mfcr r11 mfspr r12, SPRN_PID mtspr SPRN_SPRG_SCRATCH6, r11 mtspr SPRN_SPRG_SCRATCH5, r12 -#endif mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -439,21 +412,12 @@ _ENTRY(saved_ksp_limit) /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else mfspr r12, SPRN_SPRG_SCRATCH5 mfspr r11, SPRN_SPRG_SCRATCH6 mtspr SPRN_PID, r12 mtcr r11 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 -#endif mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 b InstructionAccess @@ -603,21 +567,12 @@ finish_tlb_load: /* Done...restore registers and get out of here. */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else mfspr r12, SPRN_SPRG_SCRATCH5 mfspr r11, SPRN_SPRG_SCRATCH6 mtspr SPRN_PID, r12 mtcr r11 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 -#endif mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 PPC405_ERR77_SYNC diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index d80212be8698..70bb885b14c6 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -274,17 +274,8 @@ _GLOBAL(real_writeb) #ifndef CONFIG_PPC_8xx _GLOBAL(flush_instruction_cache) #if defined(CONFIG_4xx) -#ifdef CONFIG_403GCX - li r3, 512 - mtctr r3 - lis r4, KERNELBASE@h -1: iccci 0, r4 - addi r4, r4, 16 - bdnz 1b -#else lis r3, KERNELBASE@h iccci 0,r3 -#endif #elif defined(CONFIG_FSL_BOOKE) #ifdef CONFIG_E200 mfspr r3,SPRN_L1CSR0 diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index f9c0d888ce8a..c376a0588039 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -306,10 +306,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) } } else { switch (PVR_VER(pvr)) { - case 0x0020: /* 403 family */ - maj = PVR_MAJ(pvr) + 1; - min = PVR_MIN(pvr); - break; case 0x1008: /* 740P/750P ?? */ maj = ((pvr >> 8) & 0xFF) - 1; min = pvr & 0xFF; diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index d06ca51e8443..8669be59948c 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -69,12 +69,6 @@ config PPC40x_SIMPLE help This option enables the simple PowerPC 40x platform support. -# OAK doesn't exist but wanted to keep this around for any future 403GCX boards -config 403GCX - bool - #depends on OAK - select IBM405_ERR51 - config 405GP bool select IBM405_ERR77 -- cgit v1.2.3-59-g8ed1b From 7583b63c343c1076c89b2012fd8758473f046f5f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:55:56 +0000 Subject: powerpc/40x: Remove STB03xxx CONFIG_STB03xxx is not user selectable and is not selected by any config. Remove it. Signed-off-by: Christophe Leroy Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/d7d73f9a8ee3a890566abace568101e9b4836016.1590079968.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/cputable.c | 13 ------------- arch/powerpc/platforms/40x/Kconfig | 5 ----- 2 files changed, 18 deletions(-) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 61bd8fb408b2..bdc4eab0daaf 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1245,19 +1245,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_4xx, .platform = "ppc405", }, - { /* STB 03xxx */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x40130000, - .cpu_name = "STB03xxx", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, { /* STB 04xxx */ .pvr_mask = 0xffff0000, .pvr_value = 0x41810000, diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 8669be59948c..ca8f44650647 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -86,11 +86,6 @@ config 405EZ select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC -config STB03xxx - bool - select IBM405_ERR77 - select IBM405_ERR51 - config PPC4xx_GPIO bool "PPC4xx GPIO support" depends on 40x -- cgit v1.2.3-59-g8ed1b From 5786074b96e38691a0cb3d3644ca2aa5d6d8830d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:55:57 +0000 Subject: powerpc/40x: Remove WALNUT CONFIG_WALNUT is not selected by any config and is based on 405GP which is obsolete. Remove it. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ab46013d8d33346af68faf30a719a586c3befad9.1590079968.git.christophe.leroy@csgroup.eu --- arch/powerpc/boot/Makefile | 4 +- arch/powerpc/boot/dts/walnut.dts | 246 ---------------------------- arch/powerpc/boot/treeboot-walnut.c | 81 --------- arch/powerpc/configs/40x/acadia_defconfig | 1 - arch/powerpc/configs/40x/kilauea_defconfig | 1 - arch/powerpc/configs/40x/klondike_defconfig | 1 - arch/powerpc/configs/40x/makalu_defconfig | 1 - arch/powerpc/configs/40x/obs600_defconfig | 1 - arch/powerpc/platforms/40x/Kconfig | 10 -- arch/powerpc/platforms/40x/Makefile | 1 - arch/powerpc/platforms/40x/walnut.c | 65 -------- 11 files changed, 1 insertion(+), 411 deletions(-) delete mode 100644 arch/powerpc/boot/dts/walnut.dts delete mode 100644 arch/powerpc/boot/treeboot-walnut.c delete mode 100644 arch/powerpc/platforms/40x/walnut.c diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index d8077b7071dd..749c27fcf2d9 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -75,7 +75,6 @@ $(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405 $(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405 $(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405 $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405 -$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-akebono.o: BOOTCFLAGS += -mcpu=405 @@ -132,7 +131,7 @@ src-wlib-$(CONFIG_CPM) += cpm-serial.c src-plat-y := of.c epapr.c src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ - treeboot-walnut.c cuboot-acadia.c \ + cuboot-acadia.c \ cuboot-kilauea.c simpleboot.c src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \ cuboot-bamboo.c cuboot-sam440ep.c \ @@ -278,7 +277,6 @@ image-$(CONFIG_EPAPR_BOOT) += zImage.epapr # Board ports in arch/powerpc/platform/40x/Kconfig image-$(CONFIG_EP405) += dtbImage.ep405 image-$(CONFIG_HOTFOOT) += cuImage.hotfoot -image-$(CONFIG_WALNUT) += treeImage.walnut image-$(CONFIG_ACADIA) += cuImage.acadia image-$(CONFIG_OBS600) += uImage.obs600 diff --git a/arch/powerpc/boot/dts/walnut.dts b/arch/powerpc/boot/dts/walnut.dts deleted file mode 100644 index 0872862c9363..000000000000 --- a/arch/powerpc/boot/dts/walnut.dts +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Device Tree Source for IBM Walnut - * - * Copyright 2007 IBM Corp. - * Josh Boyer - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "ibm,walnut"; - compatible = "ibm,walnut"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405GP"; - reg = <0x00000000>; - clock-frequency = <200000000>; /* Filled in by zImage */ - timebase-frequency = <0>; /* Filled in by zImage */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; - d-cache-size = <16384>; - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x00000000>; /* Filled in by zImage */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - plb { - compatible = "ibm,plb3"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by zImage */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-405gp"; - dcr-reg = <0x010 0x002>; - }; - - MAL: mcmal { - compatible = "ibm,mcmal-405gp", "ibm,mcmal"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <1>; - num-rx-chans = <1>; - interrupt-parent = <&UIC0>; - interrupts = < - 0xb 0x4 /* TXEOB */ - 0xc 0x4 /* RXEOB */ - 0xa 0x4 /* SERR */ - 0xd 0x4 /* TXDE */ - 0xe 0x4 /* RXDE */>; - }; - - POB0: opb { - compatible = "ibm,opb-405gp", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0xef600000 0xef600000 0x00a00000>; - dcr-reg = <0x0a0 0x005>; - clock-frequency = <0>; /* Filled in by zImage */ - - UART0: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x00000008>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by zImage */ - current-speed = <9600>; - interrupt-parent = <&UIC0>; - interrupts = <0x0 0x4>; - }; - - UART1: serial@ef600400 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600400 0x00000008>; - virtual-reg = <0xef600400>; - clock-frequency = <0>; /* Filled in by zImage */ - current-speed = <9600>; - interrupt-parent = <&UIC0>; - interrupts = <0x1 0x4>; - }; - - IIC: i2c@ef600500 { - compatible = "ibm,iic-405gp", "ibm,iic"; - reg = <0xef600500 0x00000011>; - interrupt-parent = <&UIC0>; - interrupts = <0x2 0x4>; - }; - - GPIO: gpio@ef600700 { - compatible = "ibm,gpio-405gp"; - reg = <0xef600700 0x00000020>; - }; - - EMAC: ethernet@ef600800 { - device_type = "network"; - compatible = "ibm,emac-405gp", "ibm,emac"; - interrupt-parent = <&UIC0>; - interrupts = < - 0xf 0x4 /* Ethernet */ - 0x9 0x4 /* Ethernet Wake Up */>; - local-mac-address = [000000000000]; /* Filled in by zImage */ - reg = <0xef600800 0x00000070>; - mal-device = <&MAL>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <1500>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - phy-mode = "rmii"; - phy-map = <0x00000001>; - }; - - }; - - EBC0: ebc { - compatible = "ibm,ebc-405gp", "ibm,ebc"; - dcr-reg = <0x012 0x002>; - #address-cells = <2>; - #size-cells = <1>; - /* The ranges property is supplied by the bootwrapper - * and is based on the firmware's configuration of the - * EBC bridge - */ - clock-frequency = <0>; /* Filled in by zImage */ - - sram@0,0 { - reg = <0x00000000 0x00000000 0x00080000>; - }; - - flash@0,80000 { - compatible = "jedec-flash"; - bank-width = <1>; - reg = <0x00000000 0x00080000 0x00080000>; - #address-cells = <1>; - #size-cells = <1>; - partition@0 { - label = "OpenBIOS"; - reg = <0x00000000 0x00080000>; - read-only; - }; - }; - - nvram@1,0 { - /* NVRAM and RTC */ - compatible = "ds1743-nvram"; - #bytes = <0x2000>; - reg = <0x00000001 0x00000000 0x00002000>; - }; - - keyboard@2,0 { - compatible = "intel,82C42PC"; - reg = <0x00000002 0x00000000 0x00000002>; - }; - - ir@3,0 { - compatible = "ti,TIR2000PAG"; - reg = <0x00000003 0x00000000 0x00000010>; - }; - - fpga@7,0 { - compatible = "Walnut-FPGA"; - reg = <0x00000007 0x00000000 0x00000010>; - virtual-reg = <0xf0300005>; - }; - }; - - PCI0: pci@ec000000 { - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb405gp-pci", "ibm,plb-pci"; - primary; - reg = <0xeec00000 0x00000008 /* Config space access */ - 0xeed80000 0x00000004 /* IACK */ - 0xeed80000 0x00000004 /* Special cycle */ - 0xef480000 0x00000040>; /* Internal registers */ - - /* Outbound ranges, one memory and one IO, - * later cannot be changed. Chip supports a second - * IO range but we don't use it for now - */ - ranges = <0x02000000 0x00000000 0x80000000 0x80000000 0x00000000 0x20000000 - 0x01000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>; - - /* Inbound 2GB range starting at 0 */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>; - - /* Walnut has all 4 IRQ pins tied together per slot */ - interrupt-map-mask = <0xf800 0x0 0x0 0x0>; - interrupt-map = < - /* IDSEL 1 */ - 0x800 0x0 0x0 0x0 &UIC0 0x1c 0x8 - - /* IDSEL 2 */ - 0x1000 0x0 0x0 0x0 &UIC0 0x1d 0x8 - - /* IDSEL 3 */ - 0x1800 0x0 0x0 0x0 &UIC0 0x1e 0x8 - - /* IDSEL 4 */ - 0x2000 0x0 0x0 0x0 &UIC0 0x1f 0x8 - >; - }; - }; - - chosen { - stdout-path = "/plb/opb/serial@ef600300"; - }; -}; diff --git a/arch/powerpc/boot/treeboot-walnut.c b/arch/powerpc/boot/treeboot-walnut.c deleted file mode 100644 index 623f58e7f7c9..000000000000 --- a/arch/powerpc/boot/treeboot-walnut.c +++ /dev/null @@ -1,81 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Old U-boot compatibility for Walnut - * - * Author: Josh Boyer - * - * Copyright 2007 IBM Corporation - * Based on cuboot-83xx.c, which is: - * Copyright (c) 2007 Freescale Semiconductor, Inc. - */ - -#include "ops.h" -#include "stdio.h" -#include "dcr.h" -#include "4xx.h" -#include "io.h" - -BSS_STACK(4096); - -static void walnut_flashsel_fixup(void) -{ - void *devp, *sram; - u32 reg_flash[3] = {0x0, 0x0, 0x80000}; - u32 reg_sram[3] = {0x0, 0x0, 0x80000}; - u8 *fpga; - u8 fpga_brds1 = 0x0; - - devp = finddevice("/plb/ebc/fpga"); - if (!devp) - fatal("Couldn't locate FPGA node\n\r"); - - if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga)) - fatal("no virtual-reg property\n\r"); - - fpga_brds1 = in_8(fpga); - - devp = finddevice("/plb/ebc/flash"); - if (!devp) - fatal("Couldn't locate flash node\n\r"); - - if (getprop(devp, "reg", reg_flash, sizeof(reg_flash)) != sizeof(reg_flash)) - fatal("flash reg property has unexpected size\n\r"); - - sram = finddevice("/plb/ebc/sram"); - if (!sram) - fatal("Couldn't locate sram node\n\r"); - - if (getprop(sram, "reg", reg_sram, sizeof(reg_sram)) != sizeof(reg_sram)) - fatal("sram reg property has unexpected size\n\r"); - - if (fpga_brds1 & 0x1) { - reg_flash[1] ^= 0x80000; - reg_sram[1] ^= 0x80000; - } - - setprop(devp, "reg", reg_flash, sizeof(reg_flash)); - setprop(sram, "reg", reg_sram, sizeof(reg_sram)); -} - -#define WALNUT_OPENBIOS_MAC_OFF 0xfffffe0b -static void walnut_fixups(void) -{ - ibm4xx_sdram_fixup_memsize(); - ibm405gp_fixup_clocks(33330000, 0xa8c000); - ibm4xx_quiesce_eth((u32 *)0xef600800, NULL); - ibm4xx_fixup_ebc_ranges("/plb/ebc"); - walnut_flashsel_fixup(); - dt_fixup_mac_address_by_alias("ethernet0", (u8 *) WALNUT_OPENBIOS_MAC_OFF); -} - -void platform_init(void) -{ - unsigned long end_of_ram = 0x2000000; - unsigned long avail_ram = end_of_ram - (unsigned long) _end; - - simple_alloc_init(_end, avail_ram, 32, 32); - platform_ops.fixups = walnut_fixups; - platform_ops.exit = ibm40x_dbcr_reset; - fdt_init(_dtb_start); - serial_console_init(); -} diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig index db93c117be36..25eed86ec528 100644 --- a/arch/powerpc/configs/40x/acadia_defconfig +++ b/arch/powerpc/configs/40x/acadia_defconfig @@ -9,7 +9,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_ACADIA=y -# CONFIG_WALNUT is not set CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig index edc22464dfb5..3549c9e950e8 100644 --- a/arch/powerpc/configs/40x/kilauea_defconfig +++ b/arch/powerpc/configs/40x/kilauea_defconfig @@ -11,7 +11,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_KILAUEA=y -# CONFIG_WALNUT is not set CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig index 579fa846839c..6a735ee75715 100644 --- a/arch/powerpc/configs/40x/klondike_defconfig +++ b/arch/powerpc/configs/40x/klondike_defconfig @@ -8,7 +8,6 @@ CONFIG_EMBEDDED=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -# CONFIG_WALNUT is not set CONFIG_APM8018X=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_MATH_EMULATION=y diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig index 188789b9aa4c..4563f88acf0c 100644 --- a/arch/powerpc/configs/40x/makalu_defconfig +++ b/arch/powerpc/configs/40x/makalu_defconfig @@ -9,7 +9,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_MAKALU=y -# CONFIG_WALNUT is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig index 5bf6af7ef093..2a2bb3f46847 100644 --- a/arch/powerpc/configs/40x/obs600_defconfig +++ b/arch/powerpc/configs/40x/obs600_defconfig @@ -10,7 +10,6 @@ CONFIG_KALLSYMS_ALL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set -# CONFIG_WALNUT is not set CONFIG_OBS600=y CONFIG_MATH_EMULATION=y CONFIG_NET=y diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index ca8f44650647..7c25be6c21d9 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -45,16 +45,6 @@ config MAKALU help This option enables support for the AMCC PPC405EX board. -config WALNUT - bool "Walnut" - depends on 40x - default y - select 405GP - select FORCE_PCI - select OF_RTC - help - This option enables support for the IBM PPC405GP evaluation board. - config OBS600 bool "OpenBlockS 600" depends on 40x diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile index e9386deed505..812f1a5736fb 100644 --- a/arch/powerpc/platforms/40x/Makefile +++ b/arch/powerpc/platforms/40x/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_WALNUT) += walnut.o obj-$(CONFIG_EP405) += ep405.o obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c deleted file mode 100644 index e5797815e2f1..000000000000 --- a/arch/powerpc/platforms/40x/walnut.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Architecture- / platform-specific boot-time initialization code for - * IBM PowerPC 4xx based boards. Adapted from original - * code by Gary Thomas, Cort Dougan , and Dan Malek - * . - * - * Copyright(c) 1999-2000 Grant Erickson - * - * Rewritten and ported to the merged powerpc tree: - * Copyright 2007 IBM Corporation - * Josh Boyer - * - * 2002 (c) MontaVista, Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -static const struct of_device_id walnut_of_bus[] __initconst = { - { .compatible = "ibm,plb3", }, - { .compatible = "ibm,opb", }, - { .compatible = "ibm,ebc", }, - {}, -}; - -static int __init walnut_device_probe(void) -{ - of_platform_bus_probe(NULL, walnut_of_bus, NULL); - of_instantiate_rtc(); - - return 0; -} -machine_device_initcall(walnut, walnut_device_probe); - -static int __init walnut_probe(void) -{ - if (!of_machine_is_compatible("ibm,walnut")) - return 0; - - pci_set_flags(PCI_REASSIGN_ALL_RSRC); - - return 1; -} - -define_machine(walnut) { - .name = "Walnut", - .probe = walnut_probe, - .progress = udbg_progress, - .init_IRQ = uic_init_tree, - .get_irq = uic_get_irq, - .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, -}; -- cgit v1.2.3-59-g8ed1b From 548f5244f1064c9facb19c5e97c21e1e80102ea0 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:55:58 +0000 Subject: powerpc/40x: Remove EP405 EP405 is an old type of board based on a 405GP which is obsolete. Remove it. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/e9534caa51f327c841b3db5f48043a47ad70d246.1590079968.git.christophe.leroy@csgroup.eu --- arch/powerpc/boot/Makefile | 3 +- arch/powerpc/boot/dts/ep405.dts | 230 ------------------------------- arch/powerpc/boot/ep405.c | 71 ---------- arch/powerpc/configs/40x/ep405_defconfig | 62 --------- arch/powerpc/configs/ppc40x_defconfig | 1 - arch/powerpc/platforms/40x/Kconfig | 8 -- arch/powerpc/platforms/40x/Makefile | 1 - arch/powerpc/platforms/40x/ep405.c | 123 ----------------- 8 files changed, 1 insertion(+), 498 deletions(-) delete mode 100644 arch/powerpc/boot/dts/ep405.dts delete mode 100644 arch/powerpc/boot/ep405.c delete mode 100644 arch/powerpc/configs/40x/ep405_defconfig delete mode 100644 arch/powerpc/platforms/40x/ep405.c diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 749c27fcf2d9..63d7456b9518 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -130,7 +130,7 @@ src-wlib-$(CONFIG_EMBEDDED6xx) += ugecon.c fsl-soc.c src-wlib-$(CONFIG_CPM) += cpm-serial.c src-plat-y := of.c epapr.c -src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ +src-plat-$(CONFIG_40x) += fixed-head.S cuboot-hotfoot.c \ cuboot-acadia.c \ cuboot-kilauea.c simpleboot.c src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \ @@ -275,7 +275,6 @@ image-$(CONFIG_EPAPR_BOOT) += zImage.epapr # # Board ports in arch/powerpc/platform/40x/Kconfig -image-$(CONFIG_EP405) += dtbImage.ep405 image-$(CONFIG_HOTFOOT) += cuImage.hotfoot image-$(CONFIG_ACADIA) += cuImage.acadia image-$(CONFIG_OBS600) += uImage.obs600 diff --git a/arch/powerpc/boot/dts/ep405.dts b/arch/powerpc/boot/dts/ep405.dts deleted file mode 100644 index 4ac9c5ab6e6b..000000000000 --- a/arch/powerpc/boot/dts/ep405.dts +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Device Tree Source for EP405 - * - * Copyright 2007 IBM Corp. - * Benjamin Herrenschmidt - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "ep405"; - compatible = "ep405"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405GP"; - reg = <0x00000000>; - clock-frequency = <200000000>; /* Filled in by zImage */ - timebase-frequency = <0>; /* Filled in by zImage */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; - d-cache-size = <16384>; - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x00000000>; /* Filled in by zImage */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - plb { - compatible = "ibm,plb3"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by zImage */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-405gp"; - dcr-reg = <0x010 0x002>; - }; - - MAL: mcmal { - compatible = "ibm,mcmal-405gp", "ibm,mcmal"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <1>; - num-rx-chans = <1>; - interrupt-parent = <&UIC0>; - interrupts = < - 0xb 0x4 /* TXEOB */ - 0xc 0x4 /* RXEOB */ - 0xa 0x4 /* SERR */ - 0xd 0x4 /* TXDE */ - 0xe 0x4 /* RXDE */>; - }; - - POB0: opb { - compatible = "ibm,opb-405gp", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0xef600000 0xef600000 0x00a00000>; - dcr-reg = <0x0a0 0x005>; - clock-frequency = <0>; /* Filled in by zImage */ - - UART0: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x00000008>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by zImage */ - current-speed = <9600>; - interrupt-parent = <&UIC0>; - interrupts = <0x0 0x4>; - }; - - UART1: serial@ef600400 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600400 0x00000008>; - virtual-reg = <0xef600400>; - clock-frequency = <0>; /* Filled in by zImage */ - current-speed = <9600>; - interrupt-parent = <&UIC0>; - interrupts = <0x1 0x4>; - }; - - IIC: i2c@ef600500 { - compatible = "ibm,iic-405gp", "ibm,iic"; - reg = <0xef600500 0x00000011>; - interrupt-parent = <&UIC0>; - interrupts = <0x2 0x4>; - }; - - GPIO: gpio@ef600700 { - compatible = "ibm,gpio-405gp"; - reg = <0xef600700 0x00000020>; - }; - - EMAC: ethernet@ef600800 { - linux,network-index = <0x0>; - device_type = "network"; - compatible = "ibm,emac-405gp", "ibm,emac"; - interrupt-parent = <&UIC0>; - interrupts = < - 0xf 0x4 /* Ethernet */ - 0x9 0x4 /* Ethernet Wake Up */>; - local-mac-address = [000000000000]; /* Filled in by zImage */ - reg = <0xef600800 0x00000070>; - mal-device = <&MAL>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <1500>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - phy-mode = "rmii"; - phy-map = <0x00000000>; - }; - - }; - - EBC0: ebc { - compatible = "ibm,ebc-405gp", "ibm,ebc"; - dcr-reg = <0x012 0x002>; - #address-cells = <2>; - #size-cells = <1>; - - - /* The ranges property is supplied by the bootwrapper - * and is based on the firmware's configuration of the - * EBC bridge - */ - clock-frequency = <0>; /* Filled in by zImage */ - - /* NVRAM and RTC */ - nvrtc@4,200000 { - compatible = "ds1742"; - reg = <0x00000004 0x00200000 0x00000000>; /* size fixed up by zImage */ - }; - - /* "BCSR" CPLD contains a PCI irq controller */ - bcsr@4,0 { - compatible = "ep405-bcsr"; - reg = <0x00000004 0x00000000 0x00000010>; - interrupt-controller; - /* Routing table */ - irq-routing = [ 00 /* SYSERR */ - 01 /* STTM */ - 01 /* RTC */ - 01 /* FENET */ - 02 /* NB PCIIRQ mux ? */ - 03 /* SB Winbond 8259 ? */ - 04 /* Serial Ring */ - 05 /* USB (ep405pc) */ - 06 /* XIRQ 0 */ - 06 /* XIRQ 1 */ - 06 /* XIRQ 2 */ - 06 /* XIRQ 3 */ - 06 /* XIRQ 4 */ - 06 /* XIRQ 5 */ - 06 /* XIRQ 6 */ - 07]; /* Reserved */ - }; - }; - - PCI0: pci@ec000000 { - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb405gp-pci", "ibm,plb-pci"; - primary; - reg = <0xeec00000 0x00000008 /* Config space access */ - 0xeed80000 0x00000004 /* IACK */ - 0xeed80000 0x00000004 /* Special cycle */ - 0xef480000 0x00000040>; /* Internal registers */ - - /* Outbound ranges, one memory and one IO, - * later cannot be changed. Chip supports a second - * IO range but we don't use it for now - */ - ranges = <0x02000000 0x00000000 0x80000000 0x80000000 0x00000000 0x20000000 - 0x01000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>; - - /* Inbound 2GB range starting at 0 */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>; - - /* That's all I know about IRQs on that thing ... */ - interrupt-map-mask = <0xf800 0x0 0x0 0x0>; - interrupt-map = < - /* USB */ - 0x7000 0x0 0x0 0x0 &UIC0 0x1e 0x8 /* IRQ5 */ - >; - }; - }; - - chosen { - stdout-path = "/plb/opb/serial@ef600300"; - }; -}; diff --git a/arch/powerpc/boot/ep405.c b/arch/powerpc/boot/ep405.c deleted file mode 100644 index f9ad1e6a844e..000000000000 --- a/arch/powerpc/boot/ep405.c +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Embedded Planet EP405 with PlanetCore firmware - * - * (c) Benjamin Herrenschmidt , IBM Corp,\ - * - * Based on ep88xc.c by - * - * Scott Wood - * - * Copyright (c) 2007 Freescale Semiconductor, Inc. - */ - -#include "ops.h" -#include "stdio.h" -#include "planetcore.h" -#include "dcr.h" -#include "4xx.h" -#include "io.h" - -static char *table; -static u64 mem_size; - -static void platform_fixups(void) -{ - u64 val; - void *nvrtc; - - dt_fixup_memory(0, mem_size); - planetcore_set_mac_addrs(table); - - if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { - printf("No PlanetCore crystal frequency key.\r\n"); - return; - } - ibm405gp_fixup_clocks(val, 0xa8c000); - ibm4xx_quiesce_eth((u32 *)0xef600800, NULL); - ibm4xx_fixup_ebc_ranges("/plb/ebc"); - - if (!planetcore_get_decimal(table, PLANETCORE_KEY_KB_NVRAM, &val)) { - printf("No PlanetCore NVRAM size key.\r\n"); - return; - } - nvrtc = finddevice("/plb/ebc/nvrtc@4,200000"); - if (nvrtc != NULL) { - u32 reg[3] = { 4, 0x200000, 0}; - getprop(nvrtc, "reg", reg, 3); - reg[2] = (val << 10) & 0xffffffff; - setprop(nvrtc, "reg", reg, 3); - } -} - -void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7) -{ - table = (char *)r3; - planetcore_prepare_table(table); - - if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) - return; - - mem_size *= 1024 * 1024; - simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64); - - fdt_init(_dtb_start); - - planetcore_set_stdout_path(table); - - serial_console_init(); - platform_ops.fixups = platform_fixups; -} diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig deleted file mode 100644 index a3854cf65f8d..000000000000 --- a/arch/powerpc/configs/40x/ep405_defconfig +++ /dev/null @@ -1,62 +0,0 @@ -CONFIG_40x=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_EP405=y -# CONFIG_WALNUT is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_IPV6 is not set -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=m -CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_NETDEVICES=y -CONFIG_IBM_EMAC=y -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -CONFIG_THERMAL=y -CONFIG_USB=y -CONFIG_USB_MON=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PPC_OF_BE=y -CONFIG_USB_OHCI_HCD_PPC_OF_LE=y -CONFIG_EXT2_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig index 88960a72b525..25f6c91e843a 100644 --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@ -10,7 +10,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PPC4xx_GPIO=y CONFIG_ACADIA=y -CONFIG_EP405=y CONFIG_HOTFOOT=y CONFIG_KILAUEA=y CONFIG_MAKALU=y diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 7c25be6c21d9..5d9d96e7223a 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -7,14 +7,6 @@ config ACADIA help This option enables support for the AMCC 405EZ Acadia evaluation board. -config EP405 - bool "EP405/EP405PC" - depends on 40x - select 405GP - select FORCE_PCI - help - This option enables support for the EP405/EP405PC boards. - config HOTFOOT bool "Hotfoot" depends on 40x diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile index 812f1a5736fb..122de98527c4 100644 --- a/arch/powerpc/platforms/40x/Makefile +++ b/arch/powerpc/platforms/40x/Makefile @@ -1,3 +1,2 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_EP405) += ep405.o obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c deleted file mode 100644 index 1c8aec6e9bb7..000000000000 --- a/arch/powerpc/platforms/40x/ep405.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Architecture- / platform-specific boot-time initialization code for - * IBM PowerPC 4xx based boards. Adapted from original - * code by Gary Thomas, Cort Dougan , and Dan Malek - * . - * - * Copyright(c) 1999-2000 Grant Erickson - * - * Rewritten and ported to the merged powerpc tree: - * Copyright 2007 IBM Corporation - * Josh Boyer - * - * Adapted to EP405 by Ben. Herrenschmidt - * - * TODO: Wire up the PCI IRQ mux and the southbridge interrupts - * - * 2002 (c) MontaVista, Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -static struct device_node *bcsr_node; -static void __iomem *bcsr_regs; - -/* BCSR registers */ -#define BCSR_ID 0 -#define BCSR_PCI_CTRL 1 -#define BCSR_FLASH_NV_POR_CTRL 2 -#define BCSR_FENET_UART_CTRL 3 -#define BCSR_PCI_IRQ 4 -#define BCSR_XIRQ_SELECT 5 -#define BCSR_XIRQ_ROUTING 6 -#define BCSR_XIRQ_STATUS 7 -#define BCSR_XIRQ_STATUS2 8 -#define BCSR_SW_STAT_LED_CTRL 9 -#define BCSR_GPIO_IRQ_PAR_CTRL 10 -/* there's more, can't be bothered typing them tho */ - - -static const struct of_device_id ep405_of_bus[] __initconst = { - { .compatible = "ibm,plb3", }, - { .compatible = "ibm,opb", }, - { .compatible = "ibm,ebc", }, - {}, -}; - -static int __init ep405_device_probe(void) -{ - of_platform_bus_probe(NULL, ep405_of_bus, NULL); - - return 0; -} -machine_device_initcall(ep405, ep405_device_probe); - -static void __init ep405_init_bcsr(void) -{ - const u8 *irq_routing; - int i; - - /* Find the bloody thing & map it */ - bcsr_node = of_find_compatible_node(NULL, NULL, "ep405-bcsr"); - if (bcsr_node == NULL) { - printk(KERN_ERR "EP405 BCSR not found !\n"); - return; - } - bcsr_regs = of_iomap(bcsr_node, 0); - if (bcsr_regs == NULL) { - printk(KERN_ERR "EP405 BCSR failed to map !\n"); - return; - } - - /* Get the irq-routing property and apply the routing to the CPLD */ - irq_routing = of_get_property(bcsr_node, "irq-routing", NULL); - if (irq_routing == NULL) - return; - for (i = 0; i < 16; i++) { - u8 irq = irq_routing[i]; - out_8(bcsr_regs + BCSR_XIRQ_SELECT, i); - out_8(bcsr_regs + BCSR_XIRQ_ROUTING, irq); - } - in_8(bcsr_regs + BCSR_XIRQ_SELECT); - mb(); - out_8(bcsr_regs + BCSR_GPIO_IRQ_PAR_CTRL, 0xfe); -} - -static void __init ep405_setup_arch(void) -{ - /* Find & init the BCSR CPLD */ - ep405_init_bcsr(); - - pci_set_flags(PCI_REASSIGN_ALL_RSRC); -} - -static int __init ep405_probe(void) -{ - if (!of_machine_is_compatible("ep405")) - return 0; - - return 1; -} - -define_machine(ep405) { - .name = "EP405", - .probe = ep405_probe, - .setup_arch = ep405_setup_arch, - .progress = udbg_progress, - .init_IRQ = uic_init_tree, - .get_irq = uic_get_irq, - .restart = ppc4xx_reset_system, - .calibrate_decr = generic_calibrate_decr, -}; -- cgit v1.2.3-59-g8ed1b From 2874ec75708eed59a47a9a986c02add747ae6e9b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:55:59 +0000 Subject: powerpc/40x: Remove support for ISS Simulator ISS4xx has support for 405GP which is obsolete. Remote it. Signed-off-by: Christophe Leroy Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7380974bf5952af825ae2552d0a987c0c1c8b506.1590079969.git.christophe.leroy@csgroup.eu --- arch/powerpc/platforms/44x/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 39e93d23fb38..78ac6d67a935 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -167,8 +167,7 @@ config YOSEMITE config ISS4xx bool "ISS 4xx Simulator" - depends on (44x || 40x) - select 405GP if 40x + depends on 44x select 440GP if 44x && !PPC_47x select PPC_FPU select OF_RTC -- cgit v1.2.3-59-g8ed1b From 7d372d4ccdd55d5ead4d4ecbc336af4dd7d04344 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:56:00 +0000 Subject: powerpc/40x: Remove support for IBM 405GP All platforms selecting the obsolete processor are gone now. Remove support for it. Signed-off-by: Christophe Leroy Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/906c6a6df710f2826e332b8a0cd5d2859a913a1c.1590079969.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/cputable.c | 13 ------------- arch/powerpc/platforms/40x/Kconfig | 6 ------ 2 files changed, 19 deletions(-) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index bdc4eab0daaf..8ed553734919 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1232,19 +1232,6 @@ static struct cpu_spec __initdata cpu_specs[] = { }, #endif /* CONFIG_PPC_8xx */ #ifdef CONFIG_40x - { /* 405GP */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x40110000, - .cpu_name = "405GP", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, { /* STB 04xxx */ .pvr_mask = 0xffff0000, .pvr_value = 0x41810000, diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 5d9d96e7223a..253c047fe6fe 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -51,12 +51,6 @@ config PPC40x_SIMPLE help This option enables the simple PowerPC 40x platform support. -config 405GP - bool - select IBM405_ERR77 - select IBM405_ERR51 - select IBM_EMAC_ZMII if IBM_EMAC - config 405EX bool select IBM_EMAC_EMAC4 if IBM_EMAC -- cgit v1.2.3-59-g8ed1b From 59fb463b48e904dfdfff64c7dd4d67f20ae27170 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:56:01 +0000 Subject: powerpc/40x: Remove IBM405 Erratum #51 This erratum was for IBM 403GCX, 405EP and STB03xxx which are now gone. Remove this erratum. Signed-off-by: Christophe Leroy Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1b6c9916514ef3e084bba57925ad9eb444627566.1590079969.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_40x.S | 6 ------ arch/powerpc/platforms/40x/Kconfig | 4 ---- 2 files changed, 10 deletions(-) diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 5fe4b7ad864b..a78cacea0be0 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -426,13 +426,7 @@ _ENTRY(saved_ksp_limit) EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_STD) EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD) EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD) -#ifdef CONFIG_IBM405_ERR51 - /* 405GP errata 51 */ - START_EXCEPTION(0x1700, Trap_17) - b DTLBMiss -#else EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD) -#endif EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD) EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD) EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_STD) diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 253c047fe6fe..ebe283476461 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -75,10 +75,6 @@ config PPC4xx_GPIO config IBM405_ERR77 bool -# All 40x-based cores, up until the 405GPR and 405EP have this errata. -config IBM405_ERR51 - bool - config APM8018X bool "APM8018X" depends on 40x -- cgit v1.2.3-59-g8ed1b From 455531e9d88048c025ff9099796413df748d92b9 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:56:02 +0000 Subject: powerpc: Remove IBM405 Erratum #77 This erratum is dedicated to IBM 405GP and STB03xxx which are now gone. Remove this erratum. Signed-off-by: Christophe Leroy Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/44dbc08e9034681eb28324cbabc086e97044c36c.1590079969.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/asm-405.h | 19 ------------------- arch/powerpc/include/asm/atomic.h | 11 ----------- arch/powerpc/include/asm/bitops.h | 4 ---- arch/powerpc/include/asm/cmpxchg.h | 11 ----------- arch/powerpc/include/asm/futex.h | 3 --- arch/powerpc/include/asm/nohash/32/pgtable.h | 1 - arch/powerpc/include/asm/spinlock.h | 4 ---- arch/powerpc/kernel/entry_32.S | 11 ----------- arch/powerpc/kernel/head_40x.S | 3 --- arch/powerpc/platforms/40x/Kconfig | 6 ------ 10 files changed, 73 deletions(-) delete mode 100644 arch/powerpc/include/asm/asm-405.h diff --git a/arch/powerpc/include/asm/asm-405.h b/arch/powerpc/include/asm/asm-405.h deleted file mode 100644 index 7270d3ae7c8e..000000000000 --- a/arch/powerpc/include/asm/asm-405.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _ASM_POWERPC_ASM_405_H -#define _ASM_POWERPC_ASM_405_H - -#include - -#ifdef __KERNEL__ -#ifdef CONFIG_IBM405_ERR77 -/* Erratum #77 on the 405 means we need a sync or dcbt before every - * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this. - */ -#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;) -#define PPC405_ERR77_SYNC stringify_in_c(sync;) -#else -#define PPC405_ERR77(ra,rb) -#define PPC405_ERR77_SYNC -#endif -#endif - -#endif /* _ASM_POWERPC_ASM_405_H */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 31c231ea56b7..498785ffc25f 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -10,7 +10,6 @@ #include #include #include -#include #define ATOMIC_INIT(i) { (i) } @@ -47,7 +46,6 @@ static __inline__ void atomic_##op(int a, atomic_t *v) \ __asm__ __volatile__( \ "1: lwarx %0,0,%3 # atomic_" #op "\n" \ #asm_op " %0,%2,%0\n" \ - PPC405_ERR77(0,%3) \ " stwcx. %0,0,%3 \n" \ " bne- 1b\n" \ : "=&r" (t), "+m" (v->counter) \ @@ -63,7 +61,6 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \ __asm__ __volatile__( \ "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \ #asm_op " %0,%2,%0\n" \ - PPC405_ERR77(0, %3) \ " stwcx. %0,0,%3\n" \ " bne- 1b\n" \ : "=&r" (t), "+m" (v->counter) \ @@ -81,7 +78,6 @@ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \ __asm__ __volatile__( \ "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \ #asm_op " %1,%3,%0\n" \ - PPC405_ERR77(0, %4) \ " stwcx. %1,0,%4\n" \ " bne- 1b\n" \ : "=&r" (res), "=&r" (t), "+m" (v->counter) \ @@ -130,7 +126,6 @@ static __inline__ void atomic_inc(atomic_t *v) __asm__ __volatile__( "1: lwarx %0,0,%2 # atomic_inc\n\ addic %0,%0,1\n" - PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" : "=&r" (t), "+m" (v->counter) @@ -146,7 +141,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v) __asm__ __volatile__( "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n" " addic %0,%0,1\n" - PPC405_ERR77(0, %2) " stwcx. %0,0,%2\n" " bne- 1b" : "=&r" (t), "+m" (v->counter) @@ -163,7 +157,6 @@ static __inline__ void atomic_dec(atomic_t *v) __asm__ __volatile__( "1: lwarx %0,0,%2 # atomic_dec\n\ addic %0,%0,-1\n" - PPC405_ERR77(0,%2)\ " stwcx. %0,0,%2\n\ bne- 1b" : "=&r" (t), "+m" (v->counter) @@ -179,7 +172,6 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) __asm__ __volatile__( "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n" " addic %0,%0,-1\n" - PPC405_ERR77(0, %2) " stwcx. %0,0,%2\n" " bne- 1b" : "=&r" (t), "+m" (v->counter) @@ -220,7 +212,6 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) cmpw 0,%0,%3 \n\ beq 2f \n\ add %0,%2,%0 \n" - PPC405_ERR77(0,%2) " stwcx. %0,0,%1 \n\ bne- 1b \n" PPC_ATOMIC_EXIT_BARRIER @@ -251,7 +242,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v) cmpwi 0,%0,0\n\ beq- 2f\n\ addic %1,%0,1\n" - PPC405_ERR77(0,%2) " stwcx. %1,0,%2\n\ bne- 1b\n" PPC_ATOMIC_EXIT_BARRIER @@ -280,7 +270,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) cmpwi %0,1\n\ addi %0,%0,-1\n\ blt- 2f\n" - PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" PPC_ATOMIC_EXIT_BARRIER diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 28dcf8222943..4a4d3afd5340 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -41,7 +41,6 @@ #include #include #include -#include /* PPC bit number conversion */ #define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be)) @@ -73,7 +72,6 @@ static inline void fn(unsigned long mask, \ prefix \ "1:" PPC_LLARX(%0,0,%3,0) "\n" \ stringify_in_c(op) "%0,%0,%2\n" \ - PPC405_ERR77(0,%3) \ PPC_STLCX "%0,0,%3\n" \ "bne- 1b\n" \ : "=&r" (old), "+m" (*p) \ @@ -119,7 +117,6 @@ static inline unsigned long fn( \ prefix \ "1:" PPC_LLARX(%0,0,%3,eh) "\n" \ stringify_in_c(op) "%1,%0,%2\n" \ - PPC405_ERR77(0,%3) \ PPC_STLCX "%1,0,%3\n" \ "bne- 1b\n" \ postfix \ @@ -175,7 +172,6 @@ clear_bit_unlock_return_word(int nr, volatile unsigned long *addr) PPC_RELEASE_BARRIER "1:" PPC_LLARX(%0,0,%3,0) "\n" "andc %1,%0,%2\n" - PPC405_ERR77(0,%3) PPC_STLCX "%1,0,%3\n" "bne- 1b\n" : "=&r" (old), "=&r" (t) diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index 27183871eb3b..cf091c4c22e5 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -6,7 +6,6 @@ #include #include #include -#include #ifdef __BIG_ENDIAN #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE) @@ -29,7 +28,6 @@ static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \ "1: lwarx %0,0,%3\n" \ " andc %1,%0,%5\n" \ " or %1,%1,%4\n" \ - PPC405_ERR77(0,%3) \ " stwcx. %1,0,%3\n" \ " bne- 1b\n" \ : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \ @@ -60,7 +58,6 @@ u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \ " bne- 2f\n" \ " andc %1,%0,%6\n" \ " or %1,%1,%5\n" \ - PPC405_ERR77(0,%3) \ " stwcx. %1,0,%3\n" \ " bne- 1b\n" \ br2 \ @@ -92,7 +89,6 @@ __xchg_u32_local(volatile void *p, unsigned long val) __asm__ __volatile__( "1: lwarx %0,0,%2 \n" - PPC405_ERR77(0,%2) " stwcx. %3,0,%2 \n\ bne- 1b" : "=&r" (prev), "+m" (*(volatile unsigned int *)p) @@ -109,7 +105,6 @@ __xchg_u32_relaxed(u32 *p, unsigned long val) __asm__ __volatile__( "1: lwarx %0,0,%2\n" - PPC405_ERR77(0, %2) " stwcx. %3,0,%2\n" " bne- 1b" : "=&r" (prev), "+m" (*p) @@ -127,7 +122,6 @@ __xchg_u64_local(volatile void *p, unsigned long val) __asm__ __volatile__( "1: ldarx %0,0,%2 \n" - PPC405_ERR77(0,%2) " stdcx. %3,0,%2 \n\ bne- 1b" : "=&r" (prev), "+m" (*(volatile unsigned long *)p) @@ -144,7 +138,6 @@ __xchg_u64_relaxed(u64 *p, unsigned long val) __asm__ __volatile__( "1: ldarx %0,0,%2\n" - PPC405_ERR77(0, %2) " stdcx. %3,0,%2\n" " bne- 1b" : "=&r" (prev), "+m" (*p) @@ -229,7 +222,6 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" - PPC405_ERR77(0,%2) " stwcx. %4,0,%2\n\ bne- 1b" PPC_ATOMIC_EXIT_BARRIER @@ -252,7 +244,6 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" - PPC405_ERR77(0,%2) " stwcx. %4,0,%2\n\ bne- 1b" "\n\ @@ -273,7 +264,6 @@ __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new) "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n" " cmpw 0,%0,%3\n" " bne- 2f\n" - PPC405_ERR77(0, %2) " stwcx. %4,0,%2\n" " bne- 1b\n" "2:" @@ -301,7 +291,6 @@ __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new) "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n" " cmpw 0,%0,%3\n" " bne- 2f\n" - PPC405_ERR77(0, %2) " stwcx. %4,0,%2\n" " bne- 1b\n" PPC_ACQUIRE_BARRIER diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index f187bb5e524e..e93ee3202e4c 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -8,14 +8,12 @@ #include #include #include -#include #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ PPC_ATOMIC_ENTRY_BARRIER \ "1: lwarx %0,0,%2\n" \ insn \ - PPC405_ERR77(0, %2) \ "2: stwcx. %1,0,%2\n" \ "bne- 1b\n" \ PPC_ATOMIC_EXIT_BARRIER \ @@ -82,7 +80,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ cmpw 0,%1,%4\n\ bne- 3f\n" - PPC405_ERR77(0,%3) "2: stwcx. %5,0,%3\n\ bne- 1b\n" PPC_ATOMIC_EXIT_BARRIER diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 46cd5428fc52..639f3b3713ec 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -9,7 +9,6 @@ #include #include #include /* For sub-arch specific PPC_PIN_SIZE */ -#include #ifdef CONFIG_44x extern int icache_44x_need_flush; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 860228e917dc..2d620896cdae 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -23,7 +23,6 @@ #endif #include #include -#include #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ @@ -210,7 +209,6 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) __DO_SIGN_EXTEND " addic. %0,%0,1\n\ ble- 2f\n" - PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b\n" PPC_ACQUIRE_BARRIER @@ -234,7 +232,6 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw) "1: " PPC_LWARX(%0,0,%2,1) "\n\ cmpwi 0,%0,0\n\ bne- 2f\n" - PPC405_ERR77(0,%1) " stwcx. %1,0,%2\n\ bne- 1b\n" PPC_ACQUIRE_BARRIER @@ -292,7 +289,6 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) PPC_RELEASE_BARRIER "1: lwarx %0,0,%1\n\ addic %0,%0,-1\n" - PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" : "=&r"(tmp) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 8420abd4ea1c..a7b261440d59 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -541,9 +540,6 @@ syscall_exit_work: addi r12,r2,TI_FLAGS 3: lwarx r8,0,r12 andc r8,r8,r11 -#ifdef CONFIG_IBM405_ERR77 - dcbt 0,r12 -#endif stwcx. r8,0,r12 bne- 3b @@ -918,9 +914,6 @@ resume_kernel: addi r5,r2,TI_FLAGS 0: lwarx r8,0,r5 andc r8,r8,r11 -#ifdef CONFIG_IBM405_ERR77 - dcbt 0,r5 -#endif stwcx. r8,0,r5 bne- 0b 1: @@ -997,7 +990,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) mtspr SPRN_XER,r10 mtctr r11 - PPC405_ERR77(0,r1) BEGIN_FTR_SECTION lwarx r11,0,r1 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) @@ -1066,7 +1058,6 @@ exc_exit_start: lwz r1,GPR1(r1) .globl exc_exit_restart_end exc_exit_restart_end: - PPC405_ERR77_SYNC rfi b . /* prevent prefetch past rfi */ @@ -1109,7 +1100,6 @@ exc_exit_restart_end: lwz r11,_CTR(r1); \ mtspr SPRN_XER,r10; \ mtctr r11; \ - PPC405_ERR77(0,r1); \ stwcx. r0,0,r1; /* to clear the reservation */ \ lwz r11,_LINK(r1); \ mtlr r11; \ @@ -1129,7 +1119,6 @@ exc_exit_restart_end: lwz r10,GPR10(r1); \ lwz r11,GPR11(r1); \ lwz r1,GPR1(r1); \ - PPC405_ERR77_SYNC; \ exc_lvl_rfi; \ b .; /* prevent prefetch past exc_lvl_rfi */ diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a78cacea0be0..75238897093d 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -36,7 +36,6 @@ #include #include #include -#include #include "head_32.h" @@ -487,7 +486,6 @@ _ENTRY(saved_ksp_limit) lwz r12,GPR12(r11) lwz r10,crit_r10@l(0) lwz r11,crit_r11@l(0) - PPC405_ERR77_SYNC rfci b . @@ -569,7 +567,6 @@ finish_tlb_load: mfspr r12, SPRN_SPRG_SCRATCH3 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 - PPC405_ERR77_SYNC rfi /* Should sync shadow TLBs */ b . /* prevent prefetch past rfi */ diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index ebe283476461..e3e5217c9822 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -69,12 +69,6 @@ config PPC4xx_GPIO help Enable gpiolib support for ppc40x based boards -# 40x errata/workaround config symbols, selected by the CPU models above - -# All 405-based cores up until the 405GPR and 405EP have this errata. -config IBM405_ERR77 - bool - config APM8018X bool "APM8018X" depends on 40x -- cgit v1.2.3-59-g8ed1b From 797f4016f6da4a90ac83e32b213b68ff7be3812b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:56:03 +0000 Subject: powerpc/40x: Avoid using r12 in TLB miss handlers Let's reduce the number of registers used in TLB miss handlers. We have both r9 and r12 available for any temporary use. r9 is enough, avoid using r12. Signed-off-by: Christophe Leroy Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7f330e971952abb2645fb9ca4310c0f527e84dcb.1590079969.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_40x.S | 70 ++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 37 deletions(-) diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 75238897093d..b584e81f6d19 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -255,9 +255,9 @@ _ENTRY(saved_ksp_limit) mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 mfcr r11 - mfspr r12, SPRN_PID + mfspr r9, SPRN_PID mtspr SPRN_SPRG_SCRATCH6, r11 - mtspr SPRN_SPRG_SCRATCH5, r12 + mtspr SPRN_SPRG_SCRATCH5, r9 mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -280,12 +280,12 @@ _ENTRY(saved_ksp_limit) 4: tophys(r11, r11) rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r12, 0(r11) /* Get L1 entry */ - andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ + lwz r11, 0(r11) /* Get L1 entry */ + andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */ beq 2f /* Bail if no table */ - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ + rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r11) /* Get Linux PTE */ #ifdef CONFIG_SWAP li r9, _PAGE_PRESENT | _PAGE_ACCESSED #else @@ -301,13 +301,13 @@ _ENTRY(saved_ksp_limit) /* Create TLB tag. This is the faulting address plus a static * set of bits. These are size, valid, E, U0. */ - li r12, 0x00c0 - rlwimi r10, r12, 0, 20, 31 + li r9, 0x00c0 + rlwimi r10, r9, 0, 20, 31 b finish_tlb_load 2: /* Check for possible large-page pmd entry */ - rlwinm. r9, r12, 2, 22, 24 + rlwinm. r9, r11, 2, 22, 24 beq 5f /* Create TLB tag. This is the faulting address, plus a static @@ -315,7 +315,6 @@ _ENTRY(saved_ksp_limit) */ ori r9, r9, 0x40 rlwimi r10, r9, 0, 20, 31 - mr r11, r12 b finish_tlb_load @@ -323,9 +322,9 @@ _ENTRY(saved_ksp_limit) /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ - mfspr r12, SPRN_SPRG_SCRATCH5 + mfspr r9, SPRN_SPRG_SCRATCH5 mfspr r11, SPRN_SPRG_SCRATCH6 - mtspr SPRN_PID, r12 + mtspr SPRN_PID, r9 mtcr r11 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 @@ -343,9 +342,9 @@ _ENTRY(saved_ksp_limit) mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 mfcr r11 - mfspr r12, SPRN_PID + mfspr r9, SPRN_PID mtspr SPRN_SPRG_SCRATCH6, r11 - mtspr SPRN_SPRG_SCRATCH5, r12 + mtspr SPRN_SPRG_SCRATCH5, r9 mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -368,12 +367,12 @@ _ENTRY(saved_ksp_limit) 4: tophys(r11, r11) rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r12, 0(r11) /* Get L1 entry */ - andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ + lwz r11, 0(r11) /* Get L1 entry */ + andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */ beq 2f /* Bail if no table */ - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ + rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r11) /* Get Linux PTE */ #ifdef CONFIG_SWAP li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC #else @@ -389,13 +388,13 @@ _ENTRY(saved_ksp_limit) /* Create TLB tag. This is the faulting address plus a static * set of bits. These are size, valid, E, U0. */ - li r12, 0x00c0 - rlwimi r10, r12, 0, 20, 31 + li r9, 0x00c0 + rlwimi r10, r9, 0, 20, 31 b finish_tlb_load 2: /* Check for possible large-page pmd entry */ - rlwinm. r9, r12, 2, 22, 24 + rlwinm. r9, r11, 2, 22, 24 beq 5f /* Create TLB tag. This is the faulting address, plus a static @@ -403,7 +402,6 @@ _ENTRY(saved_ksp_limit) */ ori r9, r9, 0x40 rlwimi r10, r9, 0, 20, 31 - mr r11, r12 b finish_tlb_load @@ -411,9 +409,9 @@ _ENTRY(saved_ksp_limit) /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ - mfspr r12, SPRN_SPRG_SCRATCH5 + mfspr r9, SPRN_SPRG_SCRATCH5 mfspr r11, SPRN_SPRG_SCRATCH6 - mtspr SPRN_PID, r12 + mtspr SPRN_PID, r9 mtcr r11 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 @@ -529,7 +527,7 @@ WDTException: * miss get to this point to load the TLB. * r10 - TLB_TAG value * r11 - Linux PTE - * r12, r9 - available to use + * r9 - available to use * PID - loaded with proper value when we get here * Upon exit, we reload everything and RFI. * Actually, it will fit now, but oh well.....a common place @@ -538,30 +536,28 @@ WDTException: tlb_4xx_index: .long 0 finish_tlb_load: - /* load the next available TLB index. - */ - lwz r9, tlb_4xx_index@l(0) - addi r9, r9, 1 - andi. r9, r9, (PPC40X_TLB_SIZE-1) - stw r9, tlb_4xx_index@l(0) - -6: /* * Clear out the software-only bits in the PTE to generate the * TLB_DATA value. These are the bottom 2 bits of the RPM, the * top 3 bits of the zone field, and M. */ - li r12, 0x0ce2 - andc r11, r11, r12 + li r9, 0x0ce2 + andc r11, r11, r9 + + /* load the next available TLB index. */ + lwz r9, tlb_4xx_index@l(0) + addi r9, r9, 1 + andi. r9, r9, PPC40X_TLB_SIZE - 1 + stw r9, tlb_4xx_index@l(0) tlbwe r11, r9, TLB_DATA /* Load TLB LO */ tlbwe r10, r9, TLB_TAG /* Load TLB HI */ /* Done...restore registers and get out of here. */ - mfspr r12, SPRN_SPRG_SCRATCH5 + mfspr r9, SPRN_SPRG_SCRATCH5 mfspr r11, SPRN_SPRG_SCRATCH6 - mtspr SPRN_PID, r12 + mtspr SPRN_PID, r9 mtcr r11 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 -- cgit v1.2.3-59-g8ed1b From 3aacaa719b7bf135551cabde2480e8f7bfdf7c7d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 May 2020 16:56:04 +0000 Subject: powerpc/40x: Don't save CR in SPRN_SPRG_SCRATCH6 We have r12 available, use it to keep CR around and don't save it in SPRN_SPRG_SCRATCH6. Signed-off-by: Christophe Leroy Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/019f314a98c107c4ca46e46c1cf402e9a44114a7.1590079969.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_40x.S | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index b584e81f6d19..a22a8209971b 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -254,9 +254,8 @@ _ENTRY(saved_ksp_limit) mtspr SPRN_SPRG_SCRATCH1, r11 mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 - mfcr r11 + mfcr r12 mfspr r9, SPRN_PID - mtspr SPRN_SPRG_SCRATCH6, r11 mtspr SPRN_SPRG_SCRATCH5, r9 mfspr r10, SPRN_DEAR /* Get faulting address */ @@ -323,9 +322,8 @@ _ENTRY(saved_ksp_limit) * and call the heavyweights to help us out. */ mfspr r9, SPRN_SPRG_SCRATCH5 - mfspr r11, SPRN_SPRG_SCRATCH6 mtspr SPRN_PID, r9 - mtcr r11 + mtcr r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 mfspr r11, SPRN_SPRG_SCRATCH1 @@ -341,9 +339,8 @@ _ENTRY(saved_ksp_limit) mtspr SPRN_SPRG_SCRATCH1, r11 mtspr SPRN_SPRG_SCRATCH3, r12 mtspr SPRN_SPRG_SCRATCH4, r9 - mfcr r11 + mfcr r12 mfspr r9, SPRN_PID - mtspr SPRN_SPRG_SCRATCH6, r11 mtspr SPRN_SPRG_SCRATCH5, r9 mfspr r10, SPRN_SRR0 /* Get faulting address */ @@ -410,9 +407,8 @@ _ENTRY(saved_ksp_limit) * and call the heavyweights to help us out. */ mfspr r9, SPRN_SPRG_SCRATCH5 - mfspr r11, SPRN_SPRG_SCRATCH6 mtspr SPRN_PID, r9 - mtcr r11 + mtcr r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 mfspr r11, SPRN_SPRG_SCRATCH1 @@ -556,9 +552,8 @@ finish_tlb_load: /* Done...restore registers and get out of here. */ mfspr r9, SPRN_SPRG_SCRATCH5 - mfspr r11, SPRN_SPRG_SCRATCH6 mtspr SPRN_PID, r9 - mtcr r11 + mtcr r12 mfspr r9, SPRN_SPRG_SCRATCH4 mfspr r12, SPRN_SPRG_SCRATCH3 mfspr r11, SPRN_SPRG_SCRATCH1 -- cgit v1.2.3-59-g8ed1b From bcec081ecc940fc38730b29c743bbee661164161 Mon Sep 17 00:00:00 2001 From: huhai Date: Thu, 21 May 2020 17:26:48 +1000 Subject: powerpc/4xx: Don't unmap NULL mbase Signed-off-by: huhai Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200521072648.1254699-1-mpe@ellerman.id.au --- arch/powerpc/platforms/4xx/pci.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c index e6e2adcc7b64..c13d64c3b019 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/4xx/pci.c @@ -1242,7 +1242,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) if (mbase == NULL) { printk(KERN_ERR "%pOF: Can't map internal config space !", port->node); - goto done; + return; } while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA) @@ -1252,9 +1252,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) } if (attempt) port->link = 1; -done: iounmap(mbase); - } static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { -- cgit v1.2.3-59-g8ed1b From cb2b53cbffe3c388cd676b63f34e54ceb2643ae2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 29 Apr 2020 16:56:50 +1000 Subject: powerpc/64s/kuap: Add missing isync to KUAP restore paths Writing the AMR register is documented to require context synchronizing operations before and after, for it to take effect as expected. The KUAP restore at interrupt exit time deliberately avoids the isync after the AMR update because it only needs to take effect after the context synchronizing RFID that soon follows. Add a comment for this. The missing isync before the update doesn't have an obvious justification, and seems it could theoretically allow a rogue user access to leak past the AMR update. Add isyncs for these. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200429065654.1677541-3-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/kup-radix.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h index 3bcef989a35d..101d60f16d46 100644 --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h +++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h @@ -16,7 +16,9 @@ #ifdef CONFIG_PPC_KUAP BEGIN_MMU_FTR_SECTION_NESTED(67) ld \gpr, STACK_REGS_KUAP(r1) + isync mtspr SPRN_AMR, \gpr + /* No isync required, see kuap_restore_amr() */ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) #endif .endm @@ -62,8 +64,15 @@ static inline void kuap_restore_amr(struct pt_regs *regs) { - if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) + if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { + isync(); mtspr(SPRN_AMR, regs->kuap); + /* + * No isync required here because we are about to RFI back to + * previous context before any user accesses would be made, + * which is a CSI. + */ + } } static inline void kuap_check_amr(void) -- cgit v1.2.3-59-g8ed1b From 579940bb451c2dd33396d2d56ce6ef5d92154b3b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 29 Apr 2020 16:56:51 +1000 Subject: powerpc/64/kuap: Conditionally restore AMR in interrupt exit The AMR update is made conditional on AMR actually changing, which should be the less common case on most workloads (though kernel page faults on uaccess could be frequent, this doesn't significantly slow down that case). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200429065654.1677541-4-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/kup-radix.h | 22 +++++++++++++++++++--- arch/powerpc/kernel/syscall_64.c | 14 ++++++++++---- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h index 101d60f16d46..820169bac6c4 100644 --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h +++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h @@ -62,9 +62,9 @@ #include #include -static inline void kuap_restore_amr(struct pt_regs *regs) +static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { - if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { + if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) { isync(); mtspr(SPRN_AMR, regs->kuap); /* @@ -75,6 +75,17 @@ static inline void kuap_restore_amr(struct pt_regs *regs) } } +static inline unsigned long kuap_get_and_check_amr(void) +{ + if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { + unsigned long amr = mfspr(SPRN_AMR); + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ + WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); + return amr; + } + return 0; +} + static inline void kuap_check_amr(void) { if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP)) @@ -151,13 +162,18 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); } #else /* CONFIG_PPC_KUAP */ -static inline void kuap_restore_amr(struct pt_regs *regs) +static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { } static inline void kuap_check_amr(void) { } + +static inline unsigned long kuap_get_and_check_amr(void) +{ + return 0; +} #endif /* CONFIG_PPC_KUAP */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c index 613da0d0fa8c..79edba3ab312 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -242,6 +242,10 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned BUG_ON(!FULL_REGS(regs)); BUG_ON(regs->softe != IRQS_ENABLED); + /* + * We don't need to restore AMR on the way back to userspace for KUAP. + * AMR can only have been unlocked if we interrupted the kernel. + */ kuap_check_amr(); local_irq_save(flags); @@ -313,13 +317,14 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long flags; unsigned long ret = 0; + unsigned long amr; if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI))) unrecoverable_exception(regs); BUG_ON(regs->msr & MSR_PR); BUG_ON(!FULL_REGS(regs)); - kuap_check_amr(); + amr = kuap_get_and_check_amr(); if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) { clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp); @@ -367,10 +372,11 @@ again: #endif /* - * We don't need to restore AMR on the way back to userspace for KUAP. - * The value of AMR only matters while we're in the kernel. + * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr, + * which would cause Read-After-Write stalls. Hence, we take the AMR + * value from the check above. */ - kuap_restore_amr(regs); + kuap_restore_amr(regs, amr); return ret; } -- cgit v1.2.3-59-g8ed1b From d4539074b0e9c5fa6508e8c33aaf51abc8ff6e91 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 29 Apr 2020 16:56:54 +1000 Subject: powerpc/64s/kuap: Conditionally restore AMR in kuap_restore_amr asm Similar to the C code change, make the AMR restore conditional on whether the register has changed. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200429065654.1677541-7-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/kup-radix.h | 10 +++++++--- arch/powerpc/kernel/entry_64.S | 8 ++++---- arch/powerpc/kernel/exceptions-64s.S | 4 ++-- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h index 820169bac6c4..3ee1ec60be84 100644 --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h +++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h @@ -12,13 +12,17 @@ #ifdef __ASSEMBLY__ -.macro kuap_restore_amr gpr +.macro kuap_restore_amr gpr1, gpr2 #ifdef CONFIG_PPC_KUAP BEGIN_MMU_FTR_SECTION_NESTED(67) - ld \gpr, STACK_REGS_KUAP(r1) + mfspr \gpr1, SPRN_AMR + ld \gpr2, STACK_REGS_KUAP(r1) + cmpd \gpr1, \gpr2 + beq 998f isync - mtspr SPRN_AMR, \gpr + mtspr SPRN_AMR, \gpr2 /* No isync required, see kuap_restore_amr() */ +998: END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) #endif .endm diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index b3c9f15089b6..9d49338e0c85 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -479,11 +479,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) fast_interrupt_return: _ASM_NOKPROBE_SYMBOL(fast_interrupt_return) kuap_check_amr r3, r4 - ld r4,_MSR(r1) - andi. r0,r4,MSR_PR + ld r5,_MSR(r1) + andi. r0,r5,MSR_PR bne .Lfast_user_interrupt_return - kuap_restore_amr r3 - andi. r0,r4,MSR_RI + kuap_restore_amr r3, r4 + andi. r0,r5,MSR_RI li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ bne+ .Lfast_kernel_interrupt_return addi r3,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 72036082dbaf..e70ebb5c318c 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -971,7 +971,7 @@ EXC_COMMON_BEGIN(system_reset_common) ld r10,SOFTE(r1) stb r10,PACAIRQSOFTMASK(r13) - kuap_restore_amr r10 + kuap_restore_amr r9, r10 EXCEPTION_RESTORE_REGS RFI_TO_USER_OR_KERNEL @@ -2784,7 +2784,7 @@ EXC_COMMON_BEGIN(soft_nmi_common) ld r10,SOFTE(r1) stb r10,PACAIRQSOFTMASK(r13) - kuap_restore_amr r10 + kuap_restore_amr r9, r10 EXCEPTION_RESTORE_REGS hsrr=0 RFI_TO_KERNEL -- cgit v1.2.3-59-g8ed1b From 6984856865b55c9c1ee0814c30296119cd8ba511 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 6 Apr 2020 13:07:39 +1000 Subject: powerpc/powernv/npu: Clean up compound table group initialisation Re-work the control flow a bit so what's going on is a little clearer. This also ensures the table_group is only initialised once in the P9 case. This shouldn't be a functional change since all the GPU PCI devices should have the same table_group configuration, but it does look strange. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200406030745.24595-2-oohall@gmail.com --- arch/powerpc/platforms/powernv/npu-dma.c | 46 +++++++++++++++----------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index b95b9e3c4c98..de617549c9a3 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -427,7 +427,7 @@ static void pnv_comp_attach_table_group(struct npu_comp *npucomp, struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) { - struct iommu_table_group *table_group; + struct iommu_table_group *compound_group; struct npu_comp *npucomp; struct pci_dev *gpdev = NULL; struct pci_controller *hose; @@ -446,36 +446,32 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) hose = pci_bus_to_host(npdev->bus); if (hose->npu) { - table_group = &hose->npu->npucomp.table_group; - - if (!table_group->group) { - table_group->ops = &pnv_npu_peers_ops; - iommu_register_group(table_group, - hose->global_number, - pe->pe_number); - } + /* P9 case: compound group is per-NPU (all gpus, all links) */ + npucomp = &hose->npu->npucomp; } else { - /* Create a group for 1 GPU and attached NPUs for POWER8 */ - pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL); - table_group = &pe->npucomp->table_group; - table_group->ops = &pnv_npu_peers_ops; - iommu_register_group(table_group, hose->global_number, - pe->pe_number); + /* P8 case: Compound group is per-GPU (1 gpu, 2 links) */ + npucomp = pe->npucomp = kzalloc(sizeof(*npucomp), GFP_KERNEL); } - /* Steal capabilities from a GPU PE */ - table_group->max_dynamic_windows_supported = - pe->table_group.max_dynamic_windows_supported; - table_group->tce32_start = pe->table_group.tce32_start; - table_group->tce32_size = pe->table_group.tce32_size; - table_group->max_levels = pe->table_group.max_levels; - if (!table_group->pgsizes) - table_group->pgsizes = pe->table_group.pgsizes; + compound_group = &npucomp->table_group; + if (!compound_group->group) { + compound_group->ops = &pnv_npu_peers_ops; + iommu_register_group(compound_group, hose->global_number, + pe->pe_number); + + /* Steal capabilities from a GPU PE */ + compound_group->max_dynamic_windows_supported = + pe->table_group.max_dynamic_windows_supported; + compound_group->tce32_start = pe->table_group.tce32_start; + compound_group->tce32_size = pe->table_group.tce32_size; + compound_group->max_levels = pe->table_group.max_levels; + if (!compound_group->pgsizes) + compound_group->pgsizes = pe->table_group.pgsizes; + } - npucomp = container_of(table_group, struct npu_comp, table_group); pnv_comp_attach_table_group(npucomp, pe); - return table_group; + return compound_group; } struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe) -- cgit v1.2.3-59-g8ed1b From 6cff91b2b97b1b40a52971c9b1e99980dd49fd54 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 6 Apr 2020 13:07:40 +1000 Subject: powerpc/powernv/iov: Don't add VFs to iommu group during PE config In pnv_ioda_setup_vf_PE() we register an iommu group for the VF PE then call pnv_ioda_setup_bus_iommu_group() to add devices to that group. However, this function is called before the VFs are scanned so there's no devices to add. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200406030745.24595-3-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index d1a16ebc31bb..9a8438f86c11 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1622,7 +1622,6 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) #ifdef CONFIG_IOMMU_API iommu_register_group(&pe->table_group, pe->phb->hose->global_number, pe->pe_number); - pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); #endif } } -- cgit v1.2.3-59-g8ed1b From 9b9408c55935ecc3b1c27b3eeb5a507394113cbb Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 6 Apr 2020 13:07:41 +1000 Subject: powerpc/powernv/pci: Register iommu group at PE DMA setup Move the registration of IOMMU groups out of the post-phb init fixup and into when we configure DMA for a PE. For most devices this doesn't result in any functional changes, but for NVLink attached GPUs it requires a bit of care. When the GPU is probed an IOMMU group would be created for the PE that contains it. We need to ensure that group is removed before we add the PE to the compound group that's used to keep the translations see by the PCIe and NVLink buses the same. No functional changes. Probably. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200406030745.24595-4-oohall@gmail.com --- arch/powerpc/platforms/powernv/npu-dma.c | 9 +++++++++ arch/powerpc/platforms/powernv/pci-ioda.c | 16 ++++++---------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index de617549c9a3..4fbbdfa8b327 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -469,6 +469,15 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) compound_group->pgsizes = pe->table_group.pgsizes; } + /* + * I'm not sure this is strictly required, but it's probably a good idea + * since the table_group for the PE is going to be attached to the + * compound table group. If we leave the PE's iommu group active then + * we might have the same table_group being modifiable via two sepeate + * iommu groups. + */ + iommu_group_put(pe->table_group.group); + pnv_comp_attach_table_group(npucomp, pe); return compound_group; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 9a8438f86c11..9e0776a06e60 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1619,10 +1619,6 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) } pnv_pci_ioda2_setup_dma_pe(phb, pe); -#ifdef CONFIG_IOMMU_API - iommu_register_group(&pe->table_group, - pe->phb->hose->global_number, pe->pe_number); -#endif } } @@ -2661,9 +2657,6 @@ static void pnv_pci_ioda_setup_iommu_api(void) continue; table_group = &pe->table_group; - iommu_register_group(&pe->table_group, - pe->phb->hose->global_number, - pe->pe_number); } pnv_ioda_setup_bus_iommu_group(pe, table_group, pe->pbus); @@ -2748,14 +2741,17 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, IOMMU_TABLE_GROUP_MAX_TABLES; pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb); -#ifdef CONFIG_IOMMU_API - pe->table_group.ops = &pnv_pci_ioda2_ops; -#endif rc = pnv_pci_ioda2_setup_default_config(pe); if (rc) return; +#ifdef CONFIG_IOMMU_API + pe->table_group.ops = &pnv_pci_ioda2_ops; + iommu_register_group(&pe->table_group, phb->hose->global_number, + pe->pe_number); +#endif + if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) pnv_ioda_setup_bus_dma(pe, pe->pbus); } -- cgit v1.2.3-59-g8ed1b From 84d8cc076723058cc294f4360db6ff7758c25b74 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 6 Apr 2020 13:07:42 +1000 Subject: powerpc/powernv/pci: Add device to iommu group during dma_dev_setup() Historically adding devices to their respective iommu group has been handled by the post-init phb fixup for most devices. This was done because: 1) The IOMMU group is tied to the PE (usually) so we can only setup the iommu groups after we've done resource allocation since BAR location determines the device's PE, and: 2) The sysfs directory for the pci_dev needs to be available since iommu_add_device() wants to add an attribute for the iommu group. However, since commit 30d87ef8b38d ("powerpc/pci: Fix pcibios_setup_device() ordering") both conditions are met when hose->ops->dma_dev_setup() is called so there's no real need to do this in the fixup. Moving the call to iommu_add_device() into pnv_pci_ioda_dma_setup_dev() is a nice cleanup since it puts all the per-device IOMMU setup into one place. It also results in all (non-nvlink) devices getting their iommu group via a common path rather than relying on the bus notifier hack in pnv_tce_iommu_bus_notifier() to handle the adding VFs and hotplugged devices to their group. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200406030745.24595-5-oohall@gmail.com --- arch/powerpc/platforms/powernv/npu-dma.c | 8 ++++++ arch/powerpc/platforms/powernv/pci-ioda.c | 47 +++++++++---------------------- arch/powerpc/platforms/powernv/pci.c | 20 ------------- 3 files changed, 21 insertions(+), 54 deletions(-) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 4fbbdfa8b327..df27b8d7e78f 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -469,6 +469,12 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) compound_group->pgsizes = pe->table_group.pgsizes; } + /* + * The gpu would have been added to the iommu group that's created + * for the PE. Pull it out now. + */ + iommu_del_device(&gpdev->dev); + /* * I'm not sure this is strictly required, but it's probably a good idea * since the table_group for the PE is going to be attached to the @@ -478,7 +484,9 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) */ iommu_group_put(pe->table_group.group); + /* now put the GPU into the compound group */ pnv_comp_attach_table_group(npucomp, pe); + iommu_add_device(compound_group, &gpdev->dev); return compound_group; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 9e0776a06e60..ad332948fc9a 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1774,12 +1774,10 @@ static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); pdev->dev.archdata.dma_offset = pe->tce_bypass_base; set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); - /* - * Note: iommu_add_device() will fail here as - * for physical PE: the device is already added by now; - * for virtual PE: sysfs entries are not ready yet and - * tce_iommu_bus_notifier will add the device to a group later. - */ + + /* PEs with a DMA weight of zero won't have a group */ + if (pe->table_group.group) + iommu_add_device(&pe->table_group, &pdev->dev); } /* @@ -2628,39 +2626,20 @@ static void pnv_pci_ioda_setup_iommu_api(void) struct pnv_ioda_pe *pe; /* - * There are 4 types of PEs: - * - PNV_IODA_PE_BUS: a downstream port with an adapter, - * created from pnv_pci_setup_bridge(); - * - PNV_IODA_PE_BUS_ALL: a PCI-PCIX bridge with devices behind it, - * created from pnv_pci_setup_bridge(); - * - PNV_IODA_PE_VF: a SRIOV virtual function, - * created from pnv_pcibios_sriov_enable(); - * - PNV_IODA_PE_DEV: an NPU or OCAPI device, - * created from pnv_pci_ioda_fixup(). + * For non-nvlink devices the IOMMU group is registered when the PE is + * configured and devices are added to the group when the per-device + * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is + * only initialise for "normal" IODA PHBs. * - * Normally a PE is represented by an IOMMU group, however for - * devices with side channels the groups need to be more strict. + * For NVLink devices we need to ensure the NVLinks and the GPU end up + * in the same IOMMU group, so that's handled here. */ list_for_each_entry(hose, &hose_list, list_node) { phb = hose->private_data; - if (phb->type == PNV_PHB_NPU_NVLINK || - phb->type == PNV_PHB_NPU_OCAPI) - continue; - - list_for_each_entry(pe, &phb->ioda.pe_list, list) { - struct iommu_table_group *table_group; - - table_group = pnv_try_setup_npu_table_group(pe); - if (!table_group) { - if (!pnv_pci_ioda_pe_dma_weight(pe)) - continue; - - table_group = &pe->table_group; - } - pnv_ioda_setup_bus_iommu_group(pe, table_group, - pe->pbus); - } + if (phb->type == PNV_PHB_IODA2) + list_for_each_entry(pe, &phb->ioda.pe_list, list) + pnv_try_setup_npu_table_group(pe); } /* diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 5bf818246339..091fe1cf386b 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -955,28 +955,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; - struct pci_dev *pdev; - struct pci_dn *pdn; - struct pnv_ioda_pe *pe; - struct pci_controller *hose; - struct pnv_phb *phb; switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - pdev = to_pci_dev(dev); - pdn = pci_get_pdn(pdev); - hose = pci_bus_to_host(pdev->bus); - phb = hose->private_data; - - WARN_ON_ONCE(!phb); - if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb) - return 0; - - pe = &phb->ioda.pe_array[pdn->pe_number]; - if (!pe->table_group.group) - return 0; - iommu_add_device(&pe->table_group, dev); - return 0; case BUS_NOTIFY_DEL_DEVICE: iommu_del_device(dev); return 0; -- cgit v1.2.3-59-g8ed1b From f39b8b10fcc5d4617d2be5f2910e017a55444b43 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 6 Apr 2020 13:07:43 +1000 Subject: powerpc/powernv/pci: Delete old iommu recursive iommu setup No longer used. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200406030745.24595-6-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 32 ------------------------------- 1 file changed, 32 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index ad332948fc9a..7547789598b9 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1550,11 +1550,6 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev) static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); -#ifdef CONFIG_IOMMU_API -static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe, - struct iommu_table_group *table_group, struct pci_bus *bus); - -#endif static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) { struct pci_bus *bus; @@ -2590,33 +2585,6 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = { .release_ownership = pnv_ioda2_release_ownership, }; -static void pnv_ioda_setup_bus_iommu_group_add_devices(struct pnv_ioda_pe *pe, - struct iommu_table_group *table_group, - struct pci_bus *bus) -{ - struct pci_dev *dev; - - list_for_each_entry(dev, &bus->devices, bus_list) { - iommu_add_device(table_group, &dev->dev); - - if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) - pnv_ioda_setup_bus_iommu_group_add_devices(pe, - table_group, dev->subordinate); - } -} - -static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe, - struct iommu_table_group *table_group, struct pci_bus *bus) -{ - - if (pe->flags & PNV_IODA_PE_DEV) - iommu_add_device(table_group, &pe->pdev->dev); - - if ((pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) || bus) - pnv_ioda_setup_bus_iommu_group_add_devices(pe, table_group, - bus); -} - static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb); static void pnv_pci_ioda_setup_iommu_api(void) -- cgit v1.2.3-59-g8ed1b From 96e2006a9dbc02cb1c103521405d457438a2e260 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 6 Apr 2020 13:07:44 +1000 Subject: powerpc/powernv/pci: Move tce size parsing to pci-ioda-tce.c Move it in with the rest of the TCE wrangling rather than carting around a static prototype in pci-ioda.c Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200406030745.24595-7-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda-tce.c | 28 +++++++++++++++++++++++++ arch/powerpc/platforms/powernv/pci-ioda.c | 30 --------------------------- arch/powerpc/platforms/powernv/pci.h | 2 ++ 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index 5dc6847d5f4c..f923359d8afc 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -17,6 +17,34 @@ #include #include "pci.h" +unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb) +{ + struct pci_controller *hose = phb->hose; + struct device_node *dn = hose->dn; + unsigned long mask = 0; + int i, rc, count; + u32 val; + + count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes"); + if (count <= 0) { + mask = SZ_4K | SZ_64K; + /* Add 16M for POWER8 by default */ + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + !cpu_has_feature(CPU_FTR_ARCH_300)) + mask |= SZ_16M | SZ_256M; + return mask; + } + + for (i = 0; i < count; i++) { + rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes", + i, &val); + if (rc == 0) + mask |= 1ULL << val; + } + + return mask; +} + void pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset, unsigned int page_shift) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7547789598b9..1fc4980e3776 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2585,8 +2585,6 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = { .release_ownership = pnv_ioda2_release_ownership, }; -static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb); - static void pnv_pci_ioda_setup_iommu_api(void) { struct pci_controller *hose; @@ -2638,34 +2636,6 @@ static void pnv_pci_ioda_setup_iommu_api(void) static void pnv_pci_ioda_setup_iommu_api(void) { }; #endif -static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb) -{ - struct pci_controller *hose = phb->hose; - struct device_node *dn = hose->dn; - unsigned long mask = 0; - int i, rc, count; - u32 val; - - count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes"); - if (count <= 0) { - mask = SZ_4K | SZ_64K; - /* Add 16M for POWER8 by default */ - if (cpu_has_feature(CPU_FTR_ARCH_207S) && - !cpu_has_feature(CPU_FTR_ARCH_300)) - mask |= SZ_16M | SZ_256M; - return mask; - } - - for (i = 0; i < count; i++) { - rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes", - i, &val); - if (rc == 0) - mask |= 1ULL << val; - } - - return mask; -} - static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index d3bbdeab3a32..0c5845a1f05d 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -244,4 +244,6 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset, unsigned int page_shift); +extern unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb); + #endif /* __POWERNV_PCI_H */ -- cgit v1.2.3-59-g8ed1b From 03b7bf341c18ff19129cc2825b62bb0e212463f1 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 6 Apr 2020 13:07:45 +1000 Subject: powerpc/powernv/npu: Move IOMMU group setup into npu-dma.c The NVlink IOMMU group setup is only relevant to NVLink devices so move it into the NPU containment zone. This let us remove some prototypes in pci.h and staticfy some function definitions. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200406030745.24595-8-oohall@gmail.com --- arch/powerpc/platforms/powernv/npu-dma.c | 54 ++++++++++++++++++++++++++-- arch/powerpc/platforms/powernv/pci-ioda.c | 60 ++++--------------------------- arch/powerpc/platforms/powernv/pci.h | 6 +--- 3 files changed, 60 insertions(+), 60 deletions(-) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index df27b8d7e78f..abeaa533b976 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -15,6 +15,7 @@ #include #include +#include #include #include "pci.h" @@ -425,7 +426,8 @@ static void pnv_comp_attach_table_group(struct npu_comp *npucomp, ++npucomp->pe_num; } -struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) +static struct iommu_table_group * + pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) { struct iommu_table_group *compound_group; struct npu_comp *npucomp; @@ -491,7 +493,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) return compound_group; } -struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe) +static struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe) { struct iommu_table_group *table_group; struct npu_comp *npucomp; @@ -534,6 +536,54 @@ struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe) return table_group; } + +void pnv_pci_npu_setup_iommu_groups(void) +{ + struct pci_controller *hose; + struct pnv_phb *phb; + struct pnv_ioda_pe *pe; + + /* + * For non-nvlink devices the IOMMU group is registered when the PE is + * configured and devices are added to the group when the per-device + * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is + * only initialise for "normal" IODA PHBs. + * + * For NVLink devices we need to ensure the NVLinks and the GPU end up + * in the same IOMMU group, so that's handled here. + */ + list_for_each_entry(hose, &hose_list, list_node) { + phb = hose->private_data; + + if (phb->type == PNV_PHB_IODA2) + list_for_each_entry(pe, &phb->ioda.pe_list, list) + pnv_try_setup_npu_table_group(pe); + } + + /* + * Now we have all PHBs discovered, time to add NPU devices to + * the corresponding IOMMU groups. + */ + list_for_each_entry(hose, &hose_list, list_node) { + unsigned long pgsizes; + + phb = hose->private_data; + + if (phb->type != PNV_PHB_NPU_NVLINK) + continue; + + pgsizes = pnv_ioda_parse_tce_sizes(phb); + list_for_each_entry(pe, &phb->ioda.pe_list, list) { + /* + * IODA2 bridges get this set up from + * pci_controller_ops::setup_bridge but NPU bridges + * do not have this hook defined so we do it here. + */ + pe->table_group.pgsizes = pgsizes; + pnv_npu_compound_attach(pe); + } + } +} #endif /* CONFIG_IOMMU_API */ int pnv_npu2_init(struct pci_controller *hose) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1fc4980e3776..6d28c6ca6ff5 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1288,7 +1288,7 @@ static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus) pnv_ioda_setup_npu_PE(pdev); } -static void pnv_pci_ioda_setup_PEs(void) +static void pnv_pci_ioda_setup_nvlink(void) { struct pci_controller *hose; struct pnv_phb *phb; @@ -1312,6 +1312,11 @@ static void pnv_pci_ioda_setup_PEs(void) list_for_each_entry(pe, &phb->ioda.pe_list, list) pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV); } + +#ifdef CONFIG_IOMMU_API + /* setup iommu groups so we can do nvlink pass-thru */ + pnv_pci_npu_setup_iommu_groups(); +#endif } #ifdef CONFIG_PCI_IOV @@ -2584,56 +2589,6 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = { .take_ownership = pnv_ioda2_take_ownership, .release_ownership = pnv_ioda2_release_ownership, }; - -static void pnv_pci_ioda_setup_iommu_api(void) -{ - struct pci_controller *hose; - struct pnv_phb *phb; - struct pnv_ioda_pe *pe; - - /* - * For non-nvlink devices the IOMMU group is registered when the PE is - * configured and devices are added to the group when the per-device - * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is - * only initialise for "normal" IODA PHBs. - * - * For NVLink devices we need to ensure the NVLinks and the GPU end up - * in the same IOMMU group, so that's handled here. - */ - list_for_each_entry(hose, &hose_list, list_node) { - phb = hose->private_data; - - if (phb->type == PNV_PHB_IODA2) - list_for_each_entry(pe, &phb->ioda.pe_list, list) - pnv_try_setup_npu_table_group(pe); - } - - /* - * Now we have all PHBs discovered, time to add NPU devices to - * the corresponding IOMMU groups. - */ - list_for_each_entry(hose, &hose_list, list_node) { - unsigned long pgsizes; - - phb = hose->private_data; - - if (phb->type != PNV_PHB_NPU_NVLINK) - continue; - - pgsizes = pnv_ioda_parse_tce_sizes(phb); - list_for_each_entry(pe, &phb->ioda.pe_list, list) { - /* - * IODA2 bridges get this set up from - * pci_controller_ops::setup_bridge but NPU bridges - * do not have this hook defined so we do it here. - */ - pe->table_group.pgsizes = pgsizes; - pnv_npu_compound_attach(pe); - } - } -} -#else /* !CONFIG_IOMMU_API */ -static void pnv_pci_ioda_setup_iommu_api(void) { }; #endif static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, @@ -3132,8 +3087,7 @@ static void pnv_pci_enable_bridges(void) static void pnv_pci_ioda_fixup(void) { - pnv_pci_ioda_setup_PEs(); - pnv_pci_ioda_setup_iommu_api(); + pnv_pci_ioda_setup_nvlink(); pnv_pci_ioda_create_dbgfs(); pnv_pci_enable_bridges(); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 0c5845a1f05d..20941ef2706e 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -209,11 +209,7 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, /* Nvlink functions */ extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass); extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm); -extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe); -extern struct iommu_table_group *pnv_try_setup_npu_table_group( - struct pnv_ioda_pe *pe); -extern struct iommu_table_group *pnv_npu_compound_attach( - struct pnv_ioda_pe *pe); +extern void pnv_pci_npu_setup_iommu_groups(void); /* pci-ioda-tce.c */ #define POWERNV_IOMMU_DEFAULT_LEVELS 2 -- cgit v1.2.3-59-g8ed1b From e5500ab657c51bec5af8dcf564a096de48e7a132 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 8 Apr 2020 21:22:13 +1000 Subject: powerpc/powernv: Add a print indicating when an IODA PE is released Quite useful to know in some cases. Signed-off-by: Oliver O'Halloran Reviewed-by: Sam Bobroff Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200408112213.5549-1-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6d28c6ca6ff5..9902f2bea0ec 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -3465,6 +3465,8 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) struct pnv_phb *phb = pe->phb; struct pnv_ioda_pe *slave, *tmp; + pe_info(pe, "Releasing PE\n"); + mutex_lock(&phb->ioda.pe_list_mutex); list_del(&pe->list); mutex_unlock(&phb->ioda.pe_list_mutex); -- cgit v1.2.3-59-g8ed1b From 9d0879a2dbc3d0c15f8c71490079c1c38f9f3800 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 15 Apr 2020 09:35:02 +1000 Subject: powerpc/powernv/pci: Add an explaination for PNV_IODA_PE_BUS_ALL It's pretty obsecure and confused me for a long time so I figured it's worth documenting properly. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200414233502.758-1-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 20941ef2706e..fc05f9b5caed 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -33,6 +33,24 @@ enum pnv_phb_model { #define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */ #define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */ +/* + * A brief note on PNV_IODA_PE_BUS_ALL + * + * This is needed because of the behaviour of PCIe-to-PCI bridges. The PHB uses + * the Requester ID field of the PCIe request header to determine the device + * (and PE) that initiated a DMA. In legacy PCI individual memory read/write + * requests aren't tagged with the RID. To work around this the PCIe-to-PCI + * bridge will use (secondary_bus_no << 8) | 0x00 as the RID on the PCIe side. + * + * PCIe-to-X bridges have a similar issue even though PCI-X requests also have + * a RID in the transaction header. The PCIe-to-X bridge is permitted to "take + * ownership" of a transaction by a PCI-X device when forwarding it to the PCIe + * side of the bridge. + * + * To work around these problems we use the BUS_ALL flag since every subordinate + * bus of the bridge should go into the same PE. + */ + /* Indicates operations are frozen for a PE: MMIO in PESTA & DMA in PESTB. */ #define PNV_IODA_STOPPED_STATE 0x8000000000000000 -- cgit v1.2.3-59-g8ed1b From a8d7d5fc2e1672924a391aa37ef8c02d1ec84a4e Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Fri, 17 Apr 2020 17:35:05 +1000 Subject: powerpc/powernv/pci: Add helper to find ioda_pe from BDFN For each PHB we maintain a reverse-map that can be used to find the PE that a BDFN is currently mapped to. Add a helper for doing this lookup so we can check if a PE has been configured without looking at pdn->pe_number. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200417073508.30356-2-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 10 ++++++++++ arch/powerpc/platforms/powernv/pci.h | 1 + 2 files changed, 11 insertions(+) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 9902f2bea0ec..88d9bdc46373 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -660,6 +660,16 @@ static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no) return state; } +struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn) +{ + int pe_number = phb->ioda.pe_rmap[bdfn]; + + if (pe_number == IODA_INVALID_PE) + return NULL; + + return &phb->ioda.pe_array[pe_number]; +} + struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index fc05f9b5caed..83d40a06e938 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -208,6 +208,7 @@ extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); +extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn); extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev); extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq); extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, -- cgit v1.2.3-59-g8ed1b From dc3d8f85bb571c3640ebba24b82a527cf2cb3f24 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Fri, 17 Apr 2020 17:35:06 +1000 Subject: powerpc/powernv/pci: Re-work bus PE configuration For normal PHBs IODA PEs are handled on a per-bus basis so all the devices on that bus will share a PE. Which PE specificly is determined by the location of the MMIO BARs for the devices on the bus so we can't actually configure the bus PEs until after MMIO resources are allocated. As a result PEs are currently configured by pcibios_setup_bridge(), which is called just before the bridge windows are programmed into the bus' parent bridge. Configuring the bus PE here causes a few problems: 1. The root bus doesn't have a parent bridge so setting up the PE for the root bus requires some hacks. 2. The PELT-V isn't setup correctly because pnv_ioda_set_peltv() assumes that PEs will be configured in root-to-leaf order. This assumption is broken because resource assignment is performed depth-first so the leaf bridges are setup before their parents are. The hack mentioned in 1) results in the "correct" PELT-V for busses immediately below the root port, but not for devices below a switch. 3. It's possible to break the sysfs PCI rescan feature by removing all the devices on a bus. When the last device is removed from a PE its will be de-configured. Rescanning the devices on a bus does not cause the bridge to be reconfigured rendering the devices on that bus unusable. We can address most of these problems by moving the PE setup out of pcibios_setup_bridge() and into pcibios_bus_add_device(). This fixes 1) and 2) because pcibios_bus_add_device() is called on each device in root-to-leaf order so PEs for parent buses will always be configured before their children. It also fixes 3) by ensuring the PE is configured before initialising DMA for the device. In the event the PE was de-configured due to removing all the devices in that PE it will now be reconfigured when a new device is added since there's no dependecy on the bridge_setup() hook being called. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200417073508.30356-3-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 81 ++++++++++++------------------- 1 file changed, 30 insertions(+), 51 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 88d9bdc46373..d9a870355e79 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -51,6 +51,7 @@ static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK", "NPU_OCAPI" }; static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); +static void pnv_pci_configure_bus(struct pci_bus *bus); void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, const char *fmt, ...) @@ -1120,34 +1121,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) return pe; } -static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) -{ - struct pci_dev *dev; - - list_for_each_entry(dev, &bus->devices, bus_list) { - struct pci_dn *pdn = pci_get_pdn(dev); - - if (pdn == NULL) { - pr_warn("%s: No device node associated with device !\n", - pci_name(dev)); - continue; - } - - /* - * In partial hotplug case, the PCI device might be still - * associated with the PE and needn't attach it to the PE - * again. - */ - if (pdn->pe_number != IODA_INVALID_PE) - continue; - - pe->device_count++; - pdn->pe_number = pe->pe_number; - if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) - pnv_ioda_setup_same_PE(dev->subordinate, pe); - } -} - /* * There're 2 types of PCI bus sensitive PEs: One that is compromised of * single PCI bus. Another one that contains the primary PCI bus and its @@ -1168,7 +1141,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) pe_num = phb->ioda.pe_rmap[bus->number << 8]; if (pe_num != IODA_INVALID_PE) { pe = &phb->ioda.pe_array[pe_num]; - pnv_ioda_setup_same_PE(bus, pe); return NULL; } @@ -1212,9 +1184,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) return NULL; } - /* Associate it with all child devices */ - pnv_ioda_setup_same_PE(bus, pe); - /* Put PE to the list */ list_add_tail(&pe->list, &phb->ioda.pe_list); @@ -1772,15 +1741,32 @@ static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; - /* - * The function can be called while the PE# - * hasn't been assigned. Do nothing for the - * case. - */ - if (!pdn || pdn->pe_number == IODA_INVALID_PE) - return; + /* Check if the BDFN for this device is associated with a PE yet */ + pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); + if (!pe) { + /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */ + if (WARN_ON(pdev->is_virtfn)) + return; + + pnv_pci_configure_bus(pdev->bus); + pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); + pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff); + + + /* + * If we can't setup the IODA PE something has gone horribly + * wrong and we can't enable DMA for the device. + */ + if (WARN_ON(!pe)) + return; + } else { + pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number); + } + + if (pdn) + pdn->pe_number = pe->pe_number; + pe->device_count++; - pe = &phb->ioda.pe_array[pdn->pe_number]; WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); pdev->dev.archdata.dma_offset = pe->tce_bypass_base; set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); @@ -2300,9 +2286,6 @@ found: pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; iommu_init_table(tbl, phb->hose->node, 0, 0); - if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus); - return; fail: /* XXX Failure: Try to fallback to 64-bit only ? */ @@ -2633,9 +2616,6 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, iommu_register_group(&pe->table_group, phb->hose->global_number, pe->pe_number); #endif - - if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus); } int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq) @@ -3209,16 +3189,15 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, } } -static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type) +static void pnv_pci_configure_bus(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); struct pnv_phb *phb = hose->private_data; struct pci_dev *bridge = bus->self; struct pnv_ioda_pe *pe; - bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); + bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); - /* Extend bridge's windows if necessary */ - pnv_pci_fixup_bridge_resources(bus, type); + dev_info(&bus->dev, "Configuring PE for bus\n"); /* The PE for root bus should be realized before any one else */ if (!phb->ioda.root_pe_populated) { @@ -3593,7 +3572,7 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .enable_device_hook = pnv_pci_enable_device_hook, .release_device = pnv_pci_release_device, .window_alignment = pnv_pci_window_alignment, - .setup_bridge = pnv_pci_setup_bridge, + .setup_bridge = pnv_pci_fixup_bridge_resources, .reset_secondary_bus = pnv_pci_reset_secondary_bus, .shutdown = pnv_pci_ioda_shutdown, }; -- cgit v1.2.3-59-g8ed1b From 718d249aeadff058f79c2e6b25212dd45bd711ae Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Fri, 17 Apr 2020 17:35:07 +1000 Subject: powerpc/powernv/pci: Reserve the root bus PE during init Doing it once during boot rather than doing it on the fly and drop the janky populated logic. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200417073508.30356-4-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 26 +++++++++----------------- arch/powerpc/platforms/powernv/pci.h | 1 - 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index d9a870355e79..c4f981cd3cb0 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1145,8 +1145,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) } /* PE number for root bus should have been reserved */ - if (pci_is_root_bus(bus) && - phb->ioda.root_pe_idx != IODA_INVALID_PE) + if (pci_is_root_bus(bus)) pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; /* Check if PE is determined by M64 */ @@ -3199,15 +3198,6 @@ static void pnv_pci_configure_bus(struct pci_bus *bus) dev_info(&bus->dev, "Configuring PE for bus\n"); - /* The PE for root bus should be realized before any one else */ - if (!phb->ioda.root_pe_populated) { - pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false); - if (pe) { - phb->ioda.root_pe_idx = pe->pe_number; - phb->ioda.root_pe_populated = true; - } - } - /* Don't assign PE to PCI bus, which doesn't have subordinate devices */ if (list_empty(&bus->devices)) return; @@ -3490,11 +3480,10 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) * that it can be populated again in PCI hot add path. The PE * shouldn't be destroyed as it's the global reserved resource. */ - if (phb->ioda.root_pe_populated && - phb->ioda.root_pe_idx == pe->pe_number) - phb->ioda.root_pe_populated = false; - else - pnv_ioda_free_pe(pe); + if (phb->ioda.root_pe_idx == pe->pe_number) + return; + + pnv_ioda_free_pe(pe); } static void pnv_pci_release_device(struct pci_dev *pdev) @@ -3602,6 +3591,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, struct pnv_phb *phb; unsigned long size, m64map_off, m32map_off, pemap_off; unsigned long iomap_off = 0, dma32map_off = 0; + struct pnv_ioda_pe *root_pe; struct resource r; const __be64 *prop64; const __be32 *prop32; @@ -3769,7 +3759,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1; pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); } else { - phb->ioda.root_pe_idx = IODA_INVALID_PE; + /* otherwise just allocate one */ + root_pe = pnv_ioda_alloc_pe(phb); + phb->ioda.root_pe_idx = root_pe->pe_number; } INIT_LIST_HEAD(&phb->ioda.pe_list); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 83d40a06e938..51c254f2f3cb 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -136,7 +136,6 @@ struct pnv_phb { unsigned int total_pe_num; unsigned int reserved_pe_idx; unsigned int root_pe_idx; - bool root_pe_populated; /* 32-bit MMIO window */ unsigned int m32_size; -- cgit v1.2.3-59-g8ed1b From 6ae8aedf8fa932541f48a85219d75ca041c22080 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Fri, 17 Apr 2020 17:35:08 +1000 Subject: powerpc/powernv/pci: Sprinkle around some WARN_ON()s pnv_pci_ioda_configure_bus() should now only ever be called when a device is added to the bus so add a WARN_ON() to the empty bus check. Similarly, pnv_pci_ioda_setup_bus_PE() should only ever be called for an unconfigured PE, so add a WARN_ON() for that case too. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200417073508.30356-5-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index c4f981cd3cb0..73a63efcf855 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1139,7 +1139,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) * We should reuse it instead of allocating a new one. */ pe_num = phb->ioda.pe_rmap[bus->number << 8]; - if (pe_num != IODA_INVALID_PE) { + if (WARN_ON(pe_num != IODA_INVALID_PE)) { pe = &phb->ioda.pe_array[pe_num]; return NULL; } @@ -3199,7 +3199,7 @@ static void pnv_pci_configure_bus(struct pci_bus *bus) dev_info(&bus->dev, "Configuring PE for bus\n"); /* Don't assign PE to PCI bus, which doesn't have subordinate devices */ - if (list_empty(&bus->devices)) + if (WARN_ON(list_empty(&bus->devices))) return; /* Reserve PEs according to used M64 resources */ -- cgit v1.2.3-59-g8ed1b From b4ac18eead28611ff470d0f47a35c4e0ac080d9c Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 25 May 2020 16:13:03 +0530 Subject: powerpc/perf/hv-24x7: Fix inconsistent output values incase multiple hv-24x7 events run Commit 2b206ee6b0df ("powerpc/perf/hv-24x7: Display change in counter values")' added to print _change_ in the counter value rather then raw value for 24x7 counters. Incase of transactions, the event count is set to 0 at the beginning of the transaction. It also sets the event's prev_count to the raw value at the time of initialization. Because of setting event count to 0, we are seeing some weird behaviour, whenever we run multiple 24x7 events at a time. For example: command#: ./perf stat -e "{hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/, hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/}" -C 0 -I 1000 sleep 100 1.000121704 120 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 1.000121704 5 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 2.000357733 8 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 2.000357733 10 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 3.000495215 18,446,744,073,709,551,616 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 3.000495215 18,446,744,073,709,551,616 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 4.000641884 56 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 4.000641884 18,446,744,073,709,551,616 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 5.000791887 18,446,744,073,709,551,616 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ Getting these large values in case we do -I. As we are setting event_count to 0, for interval case, overall event_count is not coming in incremental order. As we may can get new delta lesser then previous count. Because of which when we print intervals, we are getting negative value which create these large values. This patch removes part where we set event_count to 0 in function 'h_24x7_event_read'. There won't be much impact as we do set event->hw.prev_count to the raw value at the time of initialization to print change value. With this patch In power9 platform command#: ./perf stat -e "{hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/, hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/}" -C 0 -I 1000 sleep 100 1.000117685 93 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 1.000117685 1 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 2.000349331 98 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 2.000349331 2 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 3.000495900 131 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 3.000495900 4 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 4.000645920 204 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 4.000645920 61 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 4.284169997 22 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ Suggested-by: Sukadev Bhattiprolu Signed-off-by: Kajol Jain Tested-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525104308.9814-2-kjain@linux.ibm.com --- arch/powerpc/perf/hv-24x7.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 573e0b309c0c..48e8f4b17b91 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1400,16 +1400,6 @@ static void h_24x7_event_read(struct perf_event *event) h24x7hw = &get_cpu_var(hv_24x7_hw); h24x7hw->events[i] = event; put_cpu_var(h24x7hw); - /* - * Clear the event count so we can compute the _change_ - * in the 24x7 raw counter value at the end of the txn. - * - * Note that we could alternatively read the 24x7 value - * now and save its value in event->hw.prev_count. But - * that would require issuing a hcall, which would then - * defeat the purpose of using the txn interface. - */ - local64_set(&event->count, 0); } put_cpu_var(hv_24x7_reqb); -- cgit v1.2.3-59-g8ed1b From 8ba21426738207711347335b2cf3e99c690fc777 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 25 May 2020 16:13:04 +0530 Subject: powerpc/hv-24x7: Add rtas call in hv-24x7 driver to get processor details For hv_24x7 socket/chip level events, specific chip-id to which the data requested should be added as part of pmu events. But number of chips/socket in the system details are not exposed. Patch implements read_24x7_sys_info() to get system parameter values like number of sockets, cores per chip and chips per socket. Rtas_call with token "PROCESSOR_MODULE_INFO" is used to get these values. Subsequent patch exports these values via sysfs. Patch also make these parameters default to 1. Signed-off-by: Kajol Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525104308.9814-3-kjain@linux.ibm.com --- arch/powerpc/include/asm/rtas.h | 6 ++++ arch/powerpc/perf/hv-24x7.c | 62 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 3c1887351c71..7ef81b3e33f6 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -483,5 +483,11 @@ static inline void rtas_initialize(void) { }; extern int call_rtas(const char *, int, int, unsigned long *, ...); +#ifdef CONFIG_HV_PERF_CTRS +void read_24x7_sys_info(void); +#else +static inline void read_24x7_sys_info(void) { } +#endif + #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 48e8f4b17b91..fc16d979c191 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -20,6 +20,7 @@ #include #include +#include #include "hv-24x7.h" #include "hv-24x7-catalog.h" #include "hv-common.h" @@ -57,6 +58,65 @@ static bool is_physical_domain(unsigned domain) } } +/* + * The Processor Module Information system parameter allows transferring + * of certain processor module information from the platform to the OS. + * Refer PAPR+ document to get parameter token value as '43'. + */ + +#define PROCESSOR_MODULE_INFO 43 + +static u32 phys_sockets; /* Physical sockets */ +static u32 phys_chipspersocket; /* Physical chips per socket*/ +static u32 phys_coresperchip; /* Physical cores per chip */ + +/* + * read_24x7_sys_info() + * Retrieve the number of sockets and chips per socket and cores per + * chip details through the get-system-parameter rtas call. + */ +void read_24x7_sys_info(void) +{ + int call_status, len, ntypes; + + spin_lock(&rtas_data_buf_lock); + + /* + * Making system parameter: chips and sockets and cores per chip + * default to 1. + */ + phys_sockets = 1; + phys_chipspersocket = 1; + phys_coresperchip = 1; + + call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, + NULL, + PROCESSOR_MODULE_INFO, + __pa(rtas_data_buf), + RTAS_DATA_BUF_SIZE); + + if (call_status != 0) { + pr_err("Error calling get-system-parameter %d\n", + call_status); + } else { + len = be16_to_cpup((__be16 *)&rtas_data_buf[0]); + if (len < 8) + goto out; + + ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]); + + if (!ntypes) + goto out; + + phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]); + phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]); + phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]); + } + +out: + spin_unlock(&rtas_data_buf_lock); +} + /* Domains for which more than one result element are returned for each event. */ static bool domain_needs_aggregation(unsigned int domain) { @@ -1605,6 +1665,8 @@ static int hv_24x7_init(void) if (r) return r; + read_24x7_sys_info(); + return 0; } -- cgit v1.2.3-59-g8ed1b From 60beb65da1efd4cc23d05141181c39b98487950f Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 25 May 2020 16:13:05 +0530 Subject: powerpc/hv-24x7: Add sysfs files inside hv-24x7 device to show processor details To expose the system dependent parameter like total number of sockets and numbers of chips per socket, patch adds two sysfs files. "sockets" and "chips" are added to /sys/devices/hv_24x7/interface/ of the "hv_24x7" pmu. Signed-off-by: Kajol Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525104308.9814-4-kjain@linux.ibm.com --- arch/powerpc/perf/hv-24x7.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index fc16d979c191..db213eb7cb02 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -446,6 +446,24 @@ static ssize_t device_show_string(struct device *dev, return sprintf(buf, "%s\n", (char *)d->var); } +static ssize_t sockets_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", phys_sockets); +} + +static ssize_t chipspersocket_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", phys_chipspersocket); +} + +static ssize_t coresperchip_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", phys_coresperchip); +} + static struct attribute *device_str_attr_create_(char *name, char *str) { struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL); @@ -1092,6 +1110,9 @@ PAGE_0_ATTR(catalog_len, "%lld\n", (unsigned long long)be32_to_cpu(page_0->length) * 4096); static BIN_ATTR_RO(catalog, 0/* real length varies */); static DEVICE_ATTR_RO(domains); +static DEVICE_ATTR_RO(sockets); +static DEVICE_ATTR_RO(chipspersocket); +static DEVICE_ATTR_RO(coresperchip); static struct bin_attribute *if_bin_attrs[] = { &bin_attr_catalog, @@ -1102,6 +1123,9 @@ static struct attribute *if_attrs[] = { &dev_attr_catalog_len.attr, &dev_attr_catalog_version.attr, &dev_attr_domains.attr, + &dev_attr_sockets.attr, + &dev_attr_chipspersocket.attr, + &dev_attr_coresperchip.attr, NULL, }; -- cgit v1.2.3-59-g8ed1b From 15cd1d35ba4a59832df693858ef046457107bd8d Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 25 May 2020 16:13:06 +0530 Subject: Documentation/ABI: Add ABI documentation for chips and sockets Add documentation for the following sysfs files: /sys/devices/hv_24x7/interface/chipspersocket, /sys/devices/hv_24x7/interface/sockets, /sys/devices/hv_24x7/interface/coresperchip Signed-off-by: Kajol Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525104308.9814-5-kjain@linux.ibm.com --- .../testing/sysfs-bus-event_source-devices-hv_24x7 | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 index ec27c6c9e737..e8698afcd952 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 @@ -22,6 +22,27 @@ Description: Exposes the "version" field of the 24x7 catalog. This is also extractable from the provided binary "catalog" sysfs entry. +What: /sys/devices/hv_24x7/interface/sockets +Date: May 2020 +Contact: Linux on PowerPC Developer List +Description: read only + This sysfs interface exposes the number of sockets present in the + system. + +What: /sys/devices/hv_24x7/interface/chipspersocket +Date: May 2020 +Contact: Linux on PowerPC Developer List +Description: read only + This sysfs interface exposes the number of chips per socket + present in the system. + +What: /sys/devices/hv_24x7/interface/coresperchip +Date: May 2020 +Contact: Linux on PowerPC Developer List +Description: read only + This sysfs interface exposes the number of cores per chip + present in the system. + What: /sys/bus/event_source/devices/hv_24x7/event_descs/ Date: February 2014 Contact: Linux on PowerPC Developer List -- cgit v1.2.3-59-g8ed1b From 373b373053384f12951ae9f916043d955501d482 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 25 May 2020 16:13:07 +0530 Subject: powerpc/pseries: Update hv-24x7 information after migration Function 'read_sys_info_pseries()' is added to get system parameter values like number of sockets and chips per socket. and it gets these details via rtas_call with token "PROCESSOR_MODULE_INFO". Incase lpar migrate from one system to another, system parameter details like chips per sockets or number of sockets might change. So, it needs to be re-initialized otherwise, these values corresponds to previous system values. This patch adds a call to 'read_sys_info_pseries()' from 'post-mobility_fixup()' to re-init the physsockets and physchips values Signed-off-by: Kajol Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525104308.9814-6-kjain@linux.ibm.com --- arch/powerpc/platforms/pseries/mobility.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index b571285f6c14..10d982997736 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -371,6 +371,9 @@ void post_mobility_fixup(void) /* Possibly switch to a new RFI flush type */ pseries_setup_rfi_flush(); + /* Reinitialise system information for hv-24x7 */ + read_24x7_sys_info(); + return; } -- cgit v1.2.3-59-g8ed1b From 094235222d41d68d35de18170058d94a96a82628 Mon Sep 17 00:00:00 2001 From: Ram Pai Date: Mon, 24 Feb 2020 01:09:48 -0500 Subject: powerpc/xive: Share the event-queue page with the Hypervisor. XIVE interrupt controller uses an Event Queue (EQ) to enqueue event notifications when an exception occurs. The EQ is a single memory page provided by the O/S defining a circular buffer, one per server and priority couple. On baremetal, the EQ page is configured with an OPAL call. On pseries, an extra hop is necessary and the guest OS uses the hcall H_INT_SET_QUEUE_CONFIG to configure the XIVE interrupt controller. The XIVE controller being Hypervisor privileged, it will not be allowed to enqueue event notifications for a Secure VM unless the EQ pages are shared by the Secure VM. Hypervisor/Ultravisor still requires support for the TIMA and ESB page fault handlers. Until this is complete, QEMU can use the emulated XIVE device for Secure VMs, option "kernel_irqchip=off" on the QEMU pseries machine. Signed-off-by: Ram Pai Reviewed-by: Cedric Le Goater Reviewed-by: Greg Kurz Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200426020518.GC5853@oc0525413822.ibm.com --- arch/powerpc/sysdev/xive/spapr.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 7ab5c6780997..f0551a2be9df 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include "xive-internal.h" @@ -502,6 +504,9 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, rc = -EIO; } else { q->qpage = qpage; + if (is_secure_guest()) + uv_share_page(PHYS_PFN(qpage_phys), + 1 << xive_alloc_order(order)); } fail: return rc; @@ -535,6 +540,8 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, hw_cpu, prio); alloc_order = xive_alloc_order(xive_queue_shift); + if (is_secure_guest()) + uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order); free_pages((unsigned long)q->qpage, alloc_order); q->qpage = NULL; } -- cgit v1.2.3-59-g8ed1b From bf8036a4098d1548cdccf9ed5c523ef4e83e3c68 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 28 May 2020 13:34:56 +0530 Subject: powerpc/book3s64/kvm: Fix secondary page table walk warning during migration This patch fixes the below warning reported during migration: find_kvm_secondary_pte called with kvm mmu_lock not held CPU: 23 PID: 5341 Comm: qemu-system-ppc Tainted: G W 5.7.0-rc5-kvm-00211-g9ccf10d6d088 #432 NIP: c008000000fe848c LR: c008000000fe8488 CTR: 0000000000000000 REGS: c000001e19f077e0 TRAP: 0700 Tainted: G W (5.7.0-rc5-kvm-00211-g9ccf10d6d088) MSR: 9000000000029033 CR: 42222422 XER: 20040000 CFAR: c00000000012f5ac IRQMASK: 0 GPR00: c008000000fe8488 c000001e19f07a70 c008000000ffe200 0000000000000039 GPR04: 0000000000000001 c000001ffc8b4900 0000000000018840 0000000000000007 GPR08: 0000000000000003 0000000000000001 0000000000000007 0000000000000001 GPR12: 0000000000002000 c000001fff6d9400 000000011f884678 00007fff70b70000 GPR16: 00007fff7137cb90 00007fff7dcb4410 0000000000000001 0000000000000000 GPR20: 000000000ffe0000 0000000000000000 0000000000000001 0000000000000000 GPR24: 8000000000000000 0000000000000001 c000001e1f67e600 c000001e1fd82410 GPR28: 0000000000001000 c000001e2e410000 0000000000000fff 0000000000000ffe NIP [c008000000fe848c] kvmppc_hv_get_dirty_log_radix+0x2e4/0x340 [kvm_hv] LR [c008000000fe8488] kvmppc_hv_get_dirty_log_radix+0x2e0/0x340 [kvm_hv] Call Trace: [c000001e19f07a70] [c008000000fe8488] kvmppc_hv_get_dirty_log_radix+0x2e0/0x340 [kvm_hv] (unreliable) [c000001e19f07b50] [c008000000fd42e4] kvm_vm_ioctl_get_dirty_log_hv+0x33c/0x3c0 [kvm_hv] [c000001e19f07be0] [c008000000eea878] kvm_vm_ioctl_get_dirty_log+0x30/0x50 [kvm] [c000001e19f07c00] [c008000000edc818] kvm_vm_ioctl+0x2b0/0xc00 [kvm] [c000001e19f07d50] [c00000000046e148] ksys_ioctl+0xf8/0x150 [c000001e19f07da0] [c00000000046e1c8] sys_ioctl+0x28/0x80 [c000001e19f07dc0] [c00000000003652c] system_call_exception+0x16c/0x240 [c000001e19f07e20] [c00000000000d070] system_call_common+0xf0/0x278 Instruction dump: 7d3a512a 4200ffd0 7ffefb78 4bfffdc4 60000000 3c820000 e8848468 3c620000 e86384a8 38840010 4800673d e8410018 <0fe00000> 4bfffdd4 60000000 60000000 Reported-by: Paul Mackerras Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200528080456.87797-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/kvm_book3s_64.h | 10 +++++++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 35 ++++++++++++++++++++++++++------ 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index c58e64a0a74f..9bb9bb370b53 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -635,6 +635,16 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, unsigned long gpa, unsigned long hpa, unsigned long nbytes); +static inline pte_t * +find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea, + unsigned *hshift) +{ + pte_t *pte; + + pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift); + return pte; +} + static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, unsigned *hshift) { diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 271f1c3d8443..954fd7a12149 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -1040,7 +1040,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, { unsigned long gfn = memslot->base_gfn + pagenum; unsigned long gpa = gfn << PAGE_SHIFT; - pte_t *ptep; + pte_t *ptep, pte; unsigned int shift; int ret = 0; unsigned long old, *rmapp; @@ -1048,12 +1048,35 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ret; - ptep = find_kvm_secondary_pte(kvm, gpa, &shift); - if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { - ret = 1; - if (shift) - ret = 1 << (shift - PAGE_SHIFT); + /* + * For performance reasons we don't hold kvm->mmu_lock while walking the + * partition scoped table. + */ + ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift); + if (!ptep) + return 0; + + pte = READ_ONCE(*ptep); + if (pte_present(pte) && pte_dirty(pte)) { spin_lock(&kvm->mmu_lock); + /* + * Recheck the pte again + */ + if (pte_val(pte) != pte_val(*ptep)) { + /* + * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can + * only find PAGE_SIZE pte entries here. We can continue + * to use the pte addr returned by above page table + * walk. + */ + if (!pte_present(*ptep) || !pte_dirty(*ptep)) { + spin_unlock(&kvm->mmu_lock); + return 0; + } + } + + ret = 1; + VM_BUG_ON(shift); old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, gpa, shift); kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); -- cgit v1.2.3-59-g8ed1b From ef3534a94fdbdeab4c89d18d0164be2ad5d6dbb7 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Tue, 2 Jun 2020 09:42:08 +0530 Subject: hw-breakpoints: Fix build warnings with clang kbuild test robot reported some build warnings in the hw_breakpoint code when compiled with clang[1]. Some of them were introduced by the recent powerpc change to add arch_reserve_bp_slot() and arch_release_bp_slot(). Fix them all. kernel/events/hw_breakpoint.c:71:12: warning: no previous prototype for function 'hw_breakpoint_weight' kernel/events/hw_breakpoint.c:216:12: warning: no previous prototype for function 'arch_reserve_bp_slot' kernel/events/hw_breakpoint.c:221:13: warning: no previous prototype for function 'arch_release_bp_slot' kernel/events/hw_breakpoint.c:228:13: warning: no previous prototype for function 'arch_unregister_hw_breakpoint' [1]: https://lore.kernel.org/linuxppc-dev/202005192233.oi9CjRtA%25lkp@intel.com/ Fixes: 29da4f91c0c1 ("powerpc/watchpoint: Don't allow concurrent perf and ptrace events") Reported-by: kbuild test robot Signed-off-by: Ravi Bangoria [mpe: Drop extern, flesh out change log, add Fixes tag] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200602041208.128913-1-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/hw_breakpoint.h | 3 --- include/linux/hw_breakpoint.h | 4 ++++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index f42a55eb77d2..cb424799da0d 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -70,9 +70,6 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); int arch_install_hw_breakpoint(struct perf_event *bp); void arch_uninstall_hw_breakpoint(struct perf_event *bp); -int arch_reserve_bp_slot(struct perf_event *bp); -void arch_release_bp_slot(struct perf_event *bp); -void arch_unregister_hw_breakpoint(struct perf_event *bp); void hw_breakpoint_pmu_read(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 6058c3844a76..d7d4250cd1e4 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -80,6 +80,10 @@ extern int dbg_reserve_bp_slot(struct perf_event *bp); extern int dbg_release_bp_slot(struct perf_event *bp); extern int reserve_bp_slot(struct perf_event *bp); extern void release_bp_slot(struct perf_event *bp); +int hw_breakpoint_weight(struct perf_event *bp); +int arch_reserve_bp_slot(struct perf_event *bp); +void arch_release_bp_slot(struct perf_event *bp); +void arch_unregister_hw_breakpoint(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); -- cgit v1.2.3-59-g8ed1b From be5470e0c285a68dc3afdea965032f5ddc8269d7 Mon Sep 17 00:00:00 2001 From: Pingfan Liu Date: Wed, 1 Apr 2020 22:00:44 +0800 Subject: powerpc/crashkernel: Take "mem=" option into account 'mem=" option is an easy way to put high pressure on memory during some test. Hence after applying the memory limit, instead of total mem, the actual usable memory should be considered when reserving mem for crashkernel. Otherwise the boot up may experience OOM issue. E.g. it would reserve 4G prior to the change and 512M afterward, if passing crashkernel="2G-4G:384M,4G-16G:512M,16G-64G:1G,64G-128G:2G,128G-:4G", and mem=5G on a 256G machine. This issue is powerpc specific because it puts higher priority on fadump and kdump reservation than on "mem=". Referring the following code: if (fadump_reserve_mem() == 0) reserve_crashkernel(); ... /* Ensure that total memory size is page-aligned. */ limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); memblock_enforce_memory_limit(limit); While on other arches, the effect of "mem=" takes a higher priority and pass through memblock_phys_mem_size() before calling reserve_crashkernel(). Signed-off-by: Pingfan Liu Reviewed-by: Hari Bathini Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1585749644-4148-1-git-send-email-kernelfans@gmail.com --- arch/powerpc/kexec/core.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index 078fe3d76feb..56da5eb2b923 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -115,11 +115,12 @@ void machine_kexec(struct kimage *image) void __init reserve_crashkernel(void) { - unsigned long long crash_size, crash_base; + unsigned long long crash_size, crash_base, total_mem_sz; int ret; + total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); /* use common parsing */ - ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; @@ -178,6 +179,7 @@ void __init reserve_crashkernel(void) /* Crash kernel trumps memory limit */ if (memory_limit && memory_limit <= crashk_res.end) { memory_limit = crashk_res.end + 1; + total_mem_sz = memory_limit; printk("Adjusted memory limit for crashkernel, now 0x%llx\n", memory_limit); } @@ -186,7 +188,7 @@ void __init reserve_crashkernel(void) "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crashk_res.start >> 20), - (unsigned long)(memblock_phys_mem_size() >> 20)); + (unsigned long)(total_mem_sz >> 20)); if (!memblock_is_region_memory(crashk_res.start, crash_size) || memblock_reserve(crashk_res.start, crash_size)) { -- cgit v1.2.3-59-g8ed1b From 9a2921e5baca1d25eb8d21f21d1e90581a6d0f68 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 27 May 2020 15:14:35 +0530 Subject: powerpc/fadump: Account for memory_limit while reserving memory If the memory chunk found for reserving memory overshoots the memory limit imposed, do not proceed with reserving memory. Default behavior was this until commit 140777a3d8df ("powerpc/fadump: consider reserved ranges while reserving memory") changed it unwittingly. Fixes: 140777a3d8df ("powerpc/fadump: consider reserved ranges while reserving memory") Cc: stable@vger.kernel.org Reported-by: kbuild test robot Signed-off-by: Hari Bathini Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159057266320.22331.6571453892066907320.stgit@hbathini.in.ibm.com --- arch/powerpc/kernel/fadump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 63aac8b5f233..78ab9a6ee6ac 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -603,7 +603,7 @@ int __init fadump_reserve_mem(void) */ base = fadump_locate_reserve_mem(base, size); - if (!base) { + if (!base || (base + size > mem_boundary)) { pr_err("Failed to find memory chunk for reservation!\n"); goto error_out; } -- cgit v1.2.3-59-g8ed1b From 82a7cebdd95cffa55449d6c1d97cc9b743a66056 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Fri, 29 May 2020 09:07:31 +1000 Subject: powerpc: Fix misleading small cores print Currently when we boot on a big core system, we get this print: [ 0.040500] Using small cores at SMT level This is misleading as we've actually detected big cores. This patch clears up the print to say we've detect big cores but are using small cores for scheduling. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200528230731.1235752-1-mikey@neuling.org --- arch/powerpc/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 6d2a3a3666f0..c820c95162ff 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1383,7 +1383,7 @@ void __init smp_cpus_done(unsigned int max_cpus) #ifdef CONFIG_SCHED_SMT if (has_big_cores) { - pr_info("Using small cores at SMT level\n"); + pr_info("Big cores detected but using small core scheduling\n"); power9_topology[0].mask = smallcore_smt_mask; powerpc_topology[0].mask = smallcore_smt_mask; } -- cgit v1.2.3-59-g8ed1b From 598c01b5b2fca3a9de8ad3400edbff98ec22f0b2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 20 May 2020 22:12:57 +1000 Subject: powerpc/configs/64s: Enable CONFIG_PRINTK_CALLER This adds the CPU or thread number to printk messages. This helps a lot when deciphering concurrent oopses that have been interleaved. Example output, of PID1 (T1) triggering a warning: [ 1.581678][ T1] WARNING: CPU: 0 PID: 1 at crypto/rsa-pkcs1pad.c:539 pkcs1pad_verify+0x38/0x140 [ 1.581681][ T1] Modules linked in: [ 1.581693][ T1] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.5.0-rc5-gcc-8.2.0-00121-gf84c2e595927-dirty #1515 [ 1.581700][ T1] NIP: c000000000207d64 LR: c000000000207d3c CTR: c000000000207d2c [ 1.581708][ T1] REGS: c0000000fd2e7560 TRAP: 0700 Not tainted (5.5.0-rc5-gcc-8.2.0-00121-gf84c2e595927-dirty) [ 1.581712][ T1] MSR: 9000000000029033 CR: 44000222 XER: 00040000 Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200520121257.961112-1-mpe@ellerman.id.au --- arch/powerpc/configs/powernv_defconfig | 1 + arch/powerpc/configs/ppc64_defconfig | 1 + arch/powerpc/configs/pseries_defconfig | 1 + 3 files changed, 3 insertions(+) diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index df8bdbaa5d8f..2de9aadf0f50 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -347,3 +347,4 @@ CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=m CONFIG_VHOST_NET=m CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index bae8170d7401..57142a648ebd 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -358,6 +358,7 @@ CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_NX_ENCRYPT=m CONFIG_CRYPTO_DEV_VMX=y CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_STACK_USAGE=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 0bea4d3ffb85..dfa4a726333b 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -322,3 +322,4 @@ CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=m CONFIG_VHOST_NET=m CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y -- cgit v1.2.3-59-g8ed1b From 0e7e92efe11bc5993def689e10f7bcb36f127651 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 20 May 2020 21:17:40 +1000 Subject: powerpc/xmon: Show task->thread.regs in process display Show the address of the tasks regs in the process listing in xmon. The regs should always be on the stack page that we also print the address of, but it's still helpful not to have to find them by hand. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200520111740.953679-1-mpe@ellerman.id.au --- arch/powerpc/xmon/xmon.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 16ee6639a60c..b34d7034526e 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -3185,8 +3185,8 @@ static void show_task(struct task_struct *tsk) (tsk->exit_state & EXIT_DEAD) ? 'E' : (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; - printf("%px %016lx %6d %6d %c %2d %s\n", tsk, - tsk->thread.ksp, + printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk, + tsk->thread.ksp, tsk->thread.regs, tsk->pid, rcu_dereference(tsk->parent)->pid, state, task_cpu(tsk), tsk->comm); @@ -3309,7 +3309,7 @@ static void show_tasks(void) unsigned long tskv; struct task_struct *tsk = NULL; - printf(" task_struct ->thread.ksp PID PPID S P CMD\n"); + printf(" task_struct ->thread.ksp ->thread.regs PID PPID S P CMD\n"); if (scanhex(&tskv)) tsk = (struct task_struct *)tskv; -- cgit v1.2.3-59-g8ed1b From b6eca183e23e7a6625a0d2cdb806b7cd1abcd2d2 Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Thu, 2 Apr 2020 16:51:57 -0300 Subject: powerpc/kernel: Enables memory hot-remove after reboot on pseries guests While providing guests, it's desirable to resize it's memory on demand. By now, it's possible to do so by creating a guest with a small base memory, hot-plugging all the rest, and using 'movable_node' kernel command-line parameter, which puts all hot-plugged memory in ZONE_MOVABLE, allowing it to be removed whenever needed. But there is an issue regarding guest reboot: If memory is hot-plugged, and then the guest is rebooted, all hot-plugged memory goes to ZONE_NORMAL, which offers no guaranteed hot-removal. It usually prevents this memory to be hot-removed from the guest. It's possible to use device-tree information to fix that behavior, as it stores flags for LMB ranges on ibm,dynamic-memory-vN. It involves marking each memblock with the correct flags as hotpluggable memory, which mm/memblock.c puts in ZONE_MOVABLE during boot if 'movable_node' is passed. For carrying such information, the new flag DRCONF_MEM_HOTREMOVABLE was proposed and accepted into Power Architecture documentation. This flag should be: - true (b=1) if the hypervisor may want to hot-remove it later, and - false (b=0) if it does not care. During boot, guest kernel reads the device-tree, early_init_drmem_lmb() is called for every added LMBs. Here, checking for this new flag and marking memblocks as hotplugable memory is enough to get the desirable behavior. This should cause no change if 'movable_node' parameter is not passed in kernel command-line. Signed-off-by: Leonardo Bras Reviewed-by: Bharata B Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200402195156.626430-1-leonardo@linux.ibm.com --- arch/powerpc/include/asm/drmem.h | 1 + arch/powerpc/kernel/prom.c | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h index 28c3d936fdf3..414d209f45bb 100644 --- a/arch/powerpc/include/asm/drmem.h +++ b/arch/powerpc/include/asm/drmem.h @@ -65,6 +65,7 @@ struct of_drconf_cell_v2 { #define DRCONF_MEM_ASSIGNED 0x00000008 #define DRCONF_MEM_AI_INVALID 0x00000040 #define DRCONF_MEM_RESERVED 0x00000080 +#define DRCONF_MEM_HOTREMOVABLE 0x00000100 static inline u32 drmem_lmb_size(void) { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 1dcf0e214a22..9a651366d385 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -515,9 +515,14 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb, size = 0x80000000ul - base; } + if (!validate_mem_limit(base, &size)) + continue; + DBG("Adding: %llx -> %llx\n", base, size); - if (validate_mem_limit(base, &size)) - memblock_add(base, size); + memblock_add(base, size); + + if (lmb->flags & DRCONF_MEM_HOTREMOVABLE) + memblock_mark_hotplug(base, size); } while (--rngs); } #endif /* CONFIG_PPC_PSERIES */ -- cgit v1.2.3-59-g8ed1b From af2876b501e42c3fb5174cac9dd02598436f0fdf Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Tue, 12 May 2020 18:45:35 -0300 Subject: powerpc/crash: Use NMI context for printk when starting to crash Currently, if printk lock (logbuf_lock) is held by other thread during crash, there is a chance of deadlocking the crash on next printk, and blocking a possibly desired kdump. At the start of default_machine_crash_shutdown, make printk enter NMI context, as it will use per-cpu buffers to store the message, and avoid locking logbuf_lock. Suggested-by: Michael Ellerman Signed-off-by: Leonardo Bras Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200512214533.93878-1-leobras.c@gmail.com --- arch/powerpc/kexec/crash.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c index d488311efab1..c9a889880214 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -311,6 +311,9 @@ void default_machine_crash_shutdown(struct pt_regs *regs) unsigned int i; int (*old_handler)(struct pt_regs *regs); + /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */ + printk_nmi_enter(); + /* * This function is only called after the system * has panicked or is otherwise in a critical state. -- cgit v1.2.3-59-g8ed1b From 783a015b747f606e803b798eb8b50c73c548691d Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Mon, 18 May 2020 20:42:44 -0300 Subject: powerpc/rtas: Move type/struct definitions from rtas.h into rtas-types.h In order to get any rtas* struct into other headers, including rtas.h may cause a lot of errors, regarding include dependency needed for inline functions. Create rtas-types.h and move there all type/struct definitions from rtas.h, then include rtas-types.h into rtas.h. Also, as suggested by checkpath.pl, replace uint8_t for u8. Signed-off-by: Leonardo Bras Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200518234245.200672-2-leobras.c@gmail.com --- arch/powerpc/include/asm/rtas-types.h | 124 ++++++++++++++++++++++++++++++++++ arch/powerpc/include/asm/rtas.h | 118 +------------------------------- 2 files changed, 125 insertions(+), 117 deletions(-) create mode 100644 arch/powerpc/include/asm/rtas-types.h diff --git a/arch/powerpc/include/asm/rtas-types.h b/arch/powerpc/include/asm/rtas-types.h new file mode 100644 index 000000000000..aa420561bc10 --- /dev/null +++ b/arch/powerpc/include/asm/rtas-types.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_RTAS_TYPES_H +#define _ASM_POWERPC_RTAS_TYPES_H + +#include + +typedef __be32 rtas_arg_t; + +struct rtas_args { + __be32 token; + __be32 nargs; + __be32 nret; + rtas_arg_t args[16]; + rtas_arg_t *rets; /* Pointer to return values in args[]. */ +}; + +struct rtas_t { + unsigned long entry; /* physical address pointer */ + unsigned long base; /* physical address pointer */ + unsigned long size; + arch_spinlock_t lock; + struct rtas_args args; + struct device_node *dev; /* virtual address pointer */ +}; + +struct rtas_suspend_me_data { + atomic_t working; /* number of cpus accessing this struct */ + atomic_t done; + int token; /* ibm,suspend-me */ + atomic_t error; + struct completion *complete; /* wait on this until working == 0 */ +}; + +struct rtas_error_log { + /* Byte 0 */ + u8 byte0; /* Architectural version */ + + /* Byte 1 */ + u8 byte1; + /* XXXXXXXX + * XXX 3: Severity level of error + * XX 2: Degree of recovery + * X 1: Extended log present? + * XX 2: Reserved + */ + + /* Byte 2 */ + u8 byte2; + /* XXXXXXXX + * XXXX 4: Initiator of event + * XXXX 4: Target of failed operation + */ + u8 byte3; /* General event or error*/ + __be32 extended_log_length; /* length in bytes */ + unsigned char buffer[1]; /* Start of extended log */ + /* Variable length. */ +}; + +/* RTAS general extended event log, Version 6. The extended log starts + * from "buffer" field of struct rtas_error_log defined above. + */ +struct rtas_ext_event_log_v6 { + /* Byte 0 */ + u8 byte0; + /* XXXXXXXX + * X 1: Log valid + * X 1: Unrecoverable error + * X 1: Recoverable (correctable or successfully retried) + * X 1: Bypassed unrecoverable error (degraded operation) + * X 1: Predictive error + * X 1: "New" log (always 1 for data returned from RTAS) + * X 1: Big Endian + * X 1: Reserved + */ + + /* Byte 1 */ + u8 byte1; /* reserved */ + + /* Byte 2 */ + u8 byte2; + /* XXXXXXXX + * X 1: Set to 1 (indicating log is in PowerPC format) + * XXX 3: Reserved + * XXXX 4: Log format used for bytes 12-2047 + */ + + /* Byte 3 */ + u8 byte3; /* reserved */ + /* Byte 4-11 */ + u8 reserved[8]; /* reserved */ + /* Byte 12-15 */ + __be32 company_id; /* Company ID of the company */ + /* that defines the format for */ + /* the vendor specific log type */ + /* Byte 16-end of log */ + u8 vendor_log[1]; /* Start of vendor specific log */ + /* Variable length. */ +}; + +/* Vendor specific Platform Event Log Format, Version 6, section header */ +struct pseries_errorlog { + __be16 id; /* 0x00 2-byte ASCII section ID */ + __be16 length; /* 0x02 Section length in bytes */ + u8 version; /* 0x04 Section version */ + u8 subtype; /* 0x05 Section subtype */ + __be16 creator_component; /* 0x06 Creator component ID */ + u8 data[]; /* 0x08 Start of section data */ +}; + +/* RTAS pseries hotplug errorlog section */ +struct pseries_hp_errorlog { + u8 resource; + u8 action; + u8 id_type; + u8 reserved; + union { + __be32 drc_index; + __be32 drc_count; + struct { __be32 count, index; } ic; + char drc_name[1]; + } _drc_u; +}; + +#endif /* _ASM_POWERPC_RTAS_TYPES_H */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 7ef81b3e33f6..977e32666015 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -42,33 +43,6 @@ * */ -typedef __be32 rtas_arg_t; - -struct rtas_args { - __be32 token; - __be32 nargs; - __be32 nret; - rtas_arg_t args[16]; - rtas_arg_t *rets; /* Pointer to return values in args[]. */ -}; - -struct rtas_t { - unsigned long entry; /* physical address pointer */ - unsigned long base; /* physical address pointer */ - unsigned long size; - arch_spinlock_t lock; - struct rtas_args args; - struct device_node *dev; /* virtual address pointer */ -}; - -struct rtas_suspend_me_data { - atomic_t working; /* number of cpus accessing this struct */ - atomic_t done; - int token; /* ibm,suspend-me */ - atomic_t error; - struct completion *complete; /* wait on this until working == 0 */ -}; - /* RTAS event classes */ #define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */ #define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */ @@ -148,31 +122,6 @@ struct rtas_suspend_me_data { /* RTAS check-exception vector offset */ #define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500 -struct rtas_error_log { - /* Byte 0 */ - uint8_t byte0; /* Architectural version */ - - /* Byte 1 */ - uint8_t byte1; - /* XXXXXXXX - * XXX 3: Severity level of error - * XX 2: Degree of recovery - * X 1: Extended log present? - * XX 2: Reserved - */ - - /* Byte 2 */ - uint8_t byte2; - /* XXXXXXXX - * XXXX 4: Initiator of event - * XXXX 4: Target of failed operation - */ - uint8_t byte3; /* General event or error*/ - __be32 extended_log_length; /* length in bytes */ - unsigned char buffer[1]; /* Start of extended log */ - /* Variable length. */ -}; - static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog) { return (elog->byte1 & 0xE0) >> 5; @@ -212,47 +161,6 @@ uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog) #define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8)) -/* RTAS general extended event log, Version 6. The extended log starts - * from "buffer" field of struct rtas_error_log defined above. - */ -struct rtas_ext_event_log_v6 { - /* Byte 0 */ - uint8_t byte0; - /* XXXXXXXX - * X 1: Log valid - * X 1: Unrecoverable error - * X 1: Recoverable (correctable or successfully retried) - * X 1: Bypassed unrecoverable error (degraded operation) - * X 1: Predictive error - * X 1: "New" log (always 1 for data returned from RTAS) - * X 1: Big Endian - * X 1: Reserved - */ - - /* Byte 1 */ - uint8_t byte1; /* reserved */ - - /* Byte 2 */ - uint8_t byte2; - /* XXXXXXXX - * X 1: Set to 1 (indicating log is in PowerPC format) - * XXX 3: Reserved - * XXXX 4: Log format used for bytes 12-2047 - */ - - /* Byte 3 */ - uint8_t byte3; /* reserved */ - /* Byte 4-11 */ - uint8_t reserved[8]; /* reserved */ - /* Byte 12-15 */ - __be32 company_id; /* Company ID of the company */ - /* that defines the format for */ - /* the vendor specific log type */ - /* Byte 16-end of log */ - uint8_t vendor_log[1]; /* Start of vendor specific log */ - /* Variable length. */ -}; - static inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log) { @@ -287,16 +195,6 @@ inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log) #define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P') #define PSERIES_ELOG_SECT_ID_MCE (('M' << 8) | 'C') -/* Vendor specific Platform Event Log Format, Version 6, section header */ -struct pseries_errorlog { - __be16 id; /* 0x00 2-byte ASCII section ID */ - __be16 length; /* 0x02 Section length in bytes */ - uint8_t version; /* 0x04 Section version */ - uint8_t subtype; /* 0x05 Section subtype */ - __be16 creator_component; /* 0x06 Creator component ID */ - uint8_t data[]; /* 0x08 Start of section data */ -}; - static inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect) { @@ -309,20 +207,6 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect) return be16_to_cpu(sect->length); } -/* RTAS pseries hotplug errorlog section */ -struct pseries_hp_errorlog { - u8 resource; - u8 action; - u8 id_type; - u8 reserved; - union { - __be32 drc_index; - __be32 drc_count; - struct { __be32 count, index; } ic; - char drc_name[1]; - } _drc_u; -}; - #define PSERIES_HP_ELOG_RESOURCE_CPU 1 #define PSERIES_HP_ELOG_RESOURCE_MEM 2 #define PSERIES_HP_ELOG_RESOURCE_SLOT 3 -- cgit v1.2.3-59-g8ed1b From b664db8e3f976d9233cc9ea5e3f8a8c0bcabeb48 Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Mon, 18 May 2020 20:42:45 -0300 Subject: powerpc/rtas: Implement reentrant rtas call Implement rtas_call_reentrant() for reentrant rtas-calls: "ibm,int-on", "ibm,int-off",ibm,get-xive" and "ibm,set-xive". On LoPAPR Version 1.1 (March 24, 2016), from 7.3.10.1 to 7.3.10.4, items 2 and 3 say: 2 - For the PowerPC External Interrupt option: The * call must be reentrant to the number of processors on the platform. 3 - For the PowerPC External Interrupt option: The * argument call buffer for each simultaneous call must be physically unique. So, these rtas-calls can be called in a lockless way, if using a different buffer for each cpu doing such rtas call. For this, it was suggested to add the buffer (struct rtas_args) in the PACA struct, so each cpu can have it's own buffer. The PACA struct received a pointer to rtas buffer, which is allocated in the memory range available to rtas 32-bit. Reentrant rtas calls are useful to avoid deadlocks in crashing, where rtas-calls are needed, but some other thread crashed holding the rtas.lock. This is a backtrace of a deadlock from a kdump testing environment: #0 arch_spin_lock #1 lock_rtas () #2 rtas_call (token=8204, nargs=1, nret=1, outputs=0x0) #3 ics_rtas_mask_real_irq (hw_irq=4100) #4 machine_kexec_mask_interrupts #5 default_machine_crash_shutdown #6 machine_crash_shutdown #7 __crash_kexec #8 crash_kexec #9 oops_end Signed-off-by: Leonardo Bras [mpe: Move under #ifdef PSERIES to avoid build breakage] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200518234245.200672-3-leobras.c@gmail.com --- arch/powerpc/include/asm/paca.h | 2 ++ arch/powerpc/include/asm/rtas.h | 1 + arch/powerpc/kernel/paca.c | 32 +++++++++++++++++++++++ arch/powerpc/kernel/rtas.c | 52 +++++++++++++++++++++++++++++++++++++ arch/powerpc/sysdev/xics/ics-rtas.c | 22 ++++++++-------- 5 files changed, 98 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index e3cc9eb9204d..45a839a7c6cf 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -29,6 +29,7 @@ #include #include #include +#include #include @@ -256,6 +257,7 @@ struct paca_struct { u64 l1d_flush_size; #endif #ifdef CONFIG_PPC_PSERIES + struct rtas_args *rtas_args_reentrant; u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */ #endif /* CONFIG_PPC_PSERIES */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 977e32666015..014968f25f7e 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -236,6 +236,7 @@ extern struct rtas_t rtas; extern int rtas_token(const char *service); extern int rtas_service_present(const char *service); extern int rtas_call(int token, int, int, int *, ...); +int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...); void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...); extern void __noreturn rtas_restart(char *cmd); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 3f91ccaa9c74..8d96169c597e 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "setup.h" @@ -164,6 +165,30 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) #endif /* CONFIG_PPC_BOOK3S_64 */ +#ifdef CONFIG_PPC_PSERIES +/** + * new_rtas_args() - Allocates rtas args + * @cpu: CPU number + * @limit: Memory limit for this allocation + * + * Allocates a struct rtas_args and return it's pointer, + * if not in Hypervisor mode + * + * Return: Pointer to allocated rtas_args + * NULL if CPU in Hypervisor Mode + */ +static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit) +{ + limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX); + + if (early_cpu_has_feature(CPU_FTR_HVMODE)) + return NULL; + + return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES, + limit, cpu); +} +#endif /* CONFIG_PPC_PSERIES */ + /* The Paca is an array with one entry per processor. Each contains an * lppaca, which contains the information shared between the * hypervisor and Linux. @@ -202,6 +227,10 @@ void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int /* For now -- if we have threads this will be adjusted later */ new_paca->tcd_ptr = &new_paca->tcd; #endif + +#ifdef CONFIG_PPC_PSERIES + new_paca->rtas_args_reentrant = NULL; +#endif } /* Put the paca pointer into r13 and SPRG_PACA */ @@ -273,6 +302,9 @@ void __init allocate_paca(int cpu) #endif #ifdef CONFIG_PPC_BOOK3S_64 paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); +#endif +#ifdef CONFIG_PPC_PSERIES + paca->rtas_args_reentrant = new_rtas_args(cpu, limit); #endif paca_struct_size += sizeof(struct paca_struct); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index c5fa251b8950..a09eba03f180 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -41,6 +41,7 @@ #include #include #include +#include /* This is here deliberately so it's only used in this file */ void enter_rtas(unsigned long); @@ -1014,6 +1015,57 @@ out: free_cpumask_var(offline_mask); return atomic_read(&data.error); } + +/** + * rtas_call_reentrant() - Used for reentrant rtas calls + * @token: Token for desired reentrant RTAS call + * @nargs: Number of Input Parameters + * @nret: Number of Output Parameters + * @outputs: Array of outputs + * @...: Inputs for desired RTAS call + * + * According to LoPAR documentation, only "ibm,int-on", "ibm,int-off", + * "ibm,get-xive" and "ibm,set-xive" are currently reentrant. + * Reentrant calls need their own rtas_args buffer, so not using rtas.args, but + * PACA one instead. + * + * Return: -1 on error, + * First output value of RTAS call if (nret > 0), + * 0 otherwise, + */ +int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...) +{ + va_list list; + struct rtas_args *args; + unsigned long flags; + int i, ret = 0; + + if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) + return -1; + + local_irq_save(flags); + preempt_disable(); + + /* We use the per-cpu (PACA) rtas args buffer */ + args = local_paca->rtas_args_reentrant; + + va_start(list, outputs); + va_rtas_call_unlocked(args, token, nargs, nret, list); + va_end(list); + + if (nret > 1 && outputs) + for (i = 0; i < nret - 1; ++i) + outputs[i] = be32_to_cpu(args->rets[i + 1]); + + if (nret > 0) + ret = be32_to_cpu(args->rets[0]); + + local_irq_restore(flags); + preempt_enable(); + + return ret; +} + #else /* CONFIG_PPC_PSERIES */ int rtas_ibm_suspend_me(u64 handle) { diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index 6aabc74688a6..4cf18000f07c 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -50,8 +50,8 @@ static void ics_rtas_unmask_irq(struct irq_data *d) server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, - DEFAULT_PRIORITY); + call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, + server, DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", @@ -60,7 +60,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d) } /* Now unmask the interrupt (often a no-op) */ - call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); + call_status = rtas_call_reentrant(ibm_int_on, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -91,7 +91,7 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) if (hw_irq == XICS_IPI) return; - call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); + call_status = rtas_call_reentrant(ibm_int_off, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -99,8 +99,8 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) } /* Have to set XIVE to 0xff to be able to remove a slot */ - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, - xics_default_server, 0xff); + call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, + xics_default_server, 0xff); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -131,7 +131,7 @@ static int ics_rtas_set_affinity(struct irq_data *d, if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return -1; - status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); + status = rtas_call_reentrant(ibm_get_xive, 1, 3, xics_status, hw_irq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", @@ -146,8 +146,8 @@ static int ics_rtas_set_affinity(struct irq_data *d, return -1; } - status = rtas_call(ibm_set_xive, 3, 1, NULL, - hw_irq, irq_server, xics_status[1]); + status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, + hw_irq, irq_server, xics_status[1]); if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", @@ -179,7 +179,7 @@ static int ics_rtas_map(struct ics *ics, unsigned int virq) return -EINVAL; /* Check if RTAS knows about this interrupt */ - rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); + rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, hw_irq); if (rc) return -ENXIO; @@ -198,7 +198,7 @@ static long ics_rtas_get_server(struct ics *ics, unsigned long vec) { int rc, status[2]; - rc = rtas_call(ibm_get_xive, 1, 3, status, vec); + rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, vec); if (rc) return -1; return status[0]; -- cgit v1.2.3-59-g8ed1b From 08b1add150a8863665676d0ac9c3ad2d34b2540c Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 19 May 2020 14:30:09 +1000 Subject: powerpc/configs: Add LIBNVDIMM to ppc64_defconfig This gives us OF_PMEM which is useful in mambo. This adds 153K to the text of ppc64le_defconfig which 0.8% of the total text. LIBNVDIMM text data bss dec hex Without 18574833 5518150 1539240 25632223 1871ddf With 18727834 5546206 1539368 25813408 189e1a0 Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200519043009.3081885-1-mikey@neuling.org --- arch/powerpc/configs/ppc64_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 57142a648ebd..8d7e3e98856d 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -281,6 +281,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m +CONFIG_LIBNVDIMM=y CONFIG_RAS=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y -- cgit v1.2.3-59-g8ed1b From 9ed5df69b79a22b40b20bc2132ba2495708b19c4 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 24 Feb 2020 18:02:10 +0000 Subject: powerpc/kprobes: Use probe_address() to read instructions In order to avoid Oopses, use probe_address() to read the instruction at the address where the trap happened. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7f24b5961a6839ff01df792816807f74ff236bf6.1582567319.git.christophe.leroy@c-s.fr --- arch/powerpc/kernel/kprobes.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 227510df8c55..6f96f65ebfe8 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -287,14 +287,18 @@ int kprobe_handler(struct pt_regs *regs) p = get_kprobe(addr); if (!p) { - if (*addr != BREAKPOINT_INSTRUCTION) { + unsigned int instr; + + if (probe_kernel_address(addr, instr)) + goto no_kprobe; + + if (instr != BREAKPOINT_INSTRUCTION) { /* * PowerPC has multiple variants of the "trap" * instruction. If the current instruction is a * trap variant, it could belong to someone else */ - kprobe_opcode_t cur_insn = *addr; - if (is_trap(cur_insn)) + if (is_trap(instr)) goto no_kprobe; /* * The breakpoint instruction was removed right -- cgit v1.2.3-59-g8ed1b From e83f01fdb9143a4f90b17fbf7d8b8b21efb2f968 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:36 +0000 Subject: powerpc/52xx: Blacklist functions running with MMU disabled for kprobe kprobe does not handle events happening in real mode, all functions running with MMU disabled have to be blacklisted. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1ae02b6637b87fc5aaa1d5012c3e2cb30e62b4a3.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/platforms/52xx/lite5200_sleep.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index 3a9969c429b3..70083649c9ea 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -248,6 +248,7 @@ mmu_on: blr +_ASM_NOKPROBE_SYMBOL(lite5200_wakeup) /* ---------------------------------------------------------------------- */ @@ -391,6 +392,7 @@ restore_regs: LOAD_SPRN(TBWU, 0x5b); blr +_ASM_NOKPROBE_SYMBOL(restore_regs) -- cgit v1.2.3-59-g8ed1b From 1740f15a99d30a5e2710b2b0754e65fc5ba68d1d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:37 +0000 Subject: powerpc/82xx: Blacklist pq2_restart() for kprobe kprobe does not handle events happening in real mode, all functions running with MMU disabled have to be blacklisted. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/5dca36682383577a3c2b2bca4d577e8654944461.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/platforms/82xx/pq2.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c index 1cdd5ed9d896..3b5cb39a564c 100644 --- a/arch/powerpc/platforms/82xx/pq2.c +++ b/arch/powerpc/platforms/82xx/pq2.c @@ -10,6 +10,8 @@ * Copyright (c) 2006 MontaVista Software, Inc. */ +#include + #include #include #include @@ -29,6 +31,7 @@ void __noreturn pq2_restart(char *cmd) panic("Restart failed\n"); } +NOKPROBE_SYMBOL(pq2_restart) #ifdef CONFIG_PCI static int pq2_pci_exclude_device(struct pci_controller *hose, -- cgit v1.2.3-59-g8ed1b From 7aa85127b1a170694b042cbc35a07afe3904173e Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:38 +0000 Subject: powerpc/83xx: Blacklist mpc83xx_deep_resume() for kprobe kprobe does not handle events happening in real mode, all functions running with MMU disabled have to be blacklisted. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/3ac4ab8dd7008b9706d9228a60645a1756fa84bf.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/platforms/83xx/suspend-asm.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index 3acd7470dc5e..bc6bd4d0ae96 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S @@ -548,3 +548,4 @@ mpc83xx_deep_resume: mtdec r0 rfi +_ASM_NOKPROBE_SYMBOL(mpc83xx_deep_resume) -- cgit v1.2.3-59-g8ed1b From 32a820670fa00419375a964ca8bc569e1499b90d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:39 +0000 Subject: powerpc/powermac: Blacklist functions running with MMU disabled for kprobe kprobe does not handle events happening in real mode, all functions running with MMU disabled have to be blacklisted. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6316e8883753499073f47301857e4e88b73c3ddd.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/platforms/powermac/cache.S | 2 ++ arch/powerpc/platforms/powermac/sleep.S | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S index da69e0fcb4f1..ced225415486 100644 --- a/arch/powerpc/platforms/powermac/cache.S +++ b/arch/powerpc/platforms/powermac/cache.S @@ -184,6 +184,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtlr r10 blr +_ASM_NOKPROBE_SYMBOL(flush_disable_75x) /* This code is for 745x processors */ flush_disable_745x: @@ -351,4 +352,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR) mtmsr r11 /* restore DR and EE */ isync blr +_ASM_NOKPROBE_SYMBOL(flush_disable_745x) #endif /* CONFIG_PPC_BOOK3S_32 */ diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index bd6085b470b7..f9a680fdd9c4 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -244,7 +244,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) mtmsr r2 isync b 1b - +_ASM_NOKPROBE_SYMBOL(low_cpu_die) /* * Here is the resume code. */ @@ -282,6 +282,7 @@ _GLOBAL(core99_wake_up) lwz r1,0(r3) /* Pass thru to older resume code ... */ +_ASM_NOKPROBE_SYMBOL(core99_wake_up) /* * Here is the resume code for older machines. * r1 has the physical address of SL_PC(sp). @@ -429,6 +430,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) lwz r0,4(r1) mtlr r0 blr +_ASM_NOKPROBE_SYMBOL(grackle_wake_up) turn_on_mmu: mflr r4 @@ -438,6 +440,7 @@ turn_on_mmu: sync isync rfi +_ASM_NOKPROBE_SYMBOL(turn_on_mmu) #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ -- cgit v1.2.3-59-g8ed1b From a64371b5d4fb37199dcd04cb7bf0132894018e33 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:40 +0000 Subject: powerpc/mem: Blacklist flush_dcache_icache_phys() for kprobe kprobe does not handle events happening in real mode, all functions running with MMU disabled have to be blacklisted. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/eaab3bff961c3bfe149f1d0bd3593291ef939dcc.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/mm/mem.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 041ed7cfd341..f9c20cc23718 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -468,6 +469,7 @@ static void flush_dcache_icache_phys(unsigned long physaddr) : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0) : "ctr", "memory"); } +NOKPROBE_SYMBOL(flush_dcache_icache_phys) #endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) /* -- cgit v1.2.3-59-g8ed1b From f892c21d2efb3b86ecbf8f5a95ea4abeedcc91b0 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:41 +0000 Subject: powerpc/32s: Make local symbols non visible in hash_low. In hash_low.S, a lot of named local symbols are used instead of numbers to ease code readability. However, they don't need to be visible. In order to ease blacklisting of functions running with MMU disabled for kprobe, rename the symbols to .Lsymbols in order to hide them as if they were numbered labels. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/90c430d9e0f7af772a58aaeaf17bcc6321265340.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/mm/book3s32/hash_low.S | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index 877d880890fe..7afd7ec4ca50 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -81,7 +81,7 @@ _GLOBAL(hash_page) rlwinm. r8,r8,0,0,20 /* extract pt base address */ #endif #ifdef CONFIG_SMP - beq- hash_page_out /* return if no mapping */ + beq- .Lhash_page_out /* return if no mapping */ #else /* XXX it seems like the 601 will give a machine fault on the rfi if its alignment is wrong (bottom 4 bits of address are @@ -109,11 +109,11 @@ _GLOBAL(hash_page) #if (PTE_FLAGS_OFFSET != 0) addi r8,r8,PTE_FLAGS_OFFSET #endif -retry: +.Lretry: lwarx r6,0,r8 /* get linux-style pte, flag word */ andc. r5,r3,r6 /* check access & ~permission */ #ifdef CONFIG_SMP - bne- hash_page_out /* return if access not permitted */ + bne- .Lhash_page_out /* return if access not permitted */ #else bnelr- #endif @@ -128,7 +128,7 @@ retry: #endif /* CONFIG_SMP */ #endif /* CONFIG_PTE_64BIT */ stwcx. r5,0,r8 /* attempt to update PTE */ - bne- retry /* retry if someone got there first */ + bne- .Lretry /* retry if someone got there first */ mfsrin r3,r4 /* get segment reg for segment */ #ifndef CONFIG_VMAP_STACK @@ -156,7 +156,7 @@ retry: #endif #ifdef CONFIG_SMP -hash_page_out: +.Lhash_page_out: eieio lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 @@ -360,7 +360,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ CMPPTE 0,r6,r5 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ - beq+ found_slot + beq+ .Lfound_slot patch_site 0f, patch__hash_page_B /* Search the secondary PTEG for a matching PTE */ @@ -372,7 +372,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 2: LDPTEu r6,HPTE_SIZE(r4) CMPPTE 0,r6,r5 bdnzf 2,2b - beq+ found_slot + beq+ .Lfound_slot xori r5,r5,PTE_H /* clear H bit again */ /* Search the primary PTEG for an empty slot */ @@ -381,7 +381,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ TST_V(r6) /* test valid bit */ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ - beq+ found_empty + beq+ .Lfound_empty /* update counter of times that the primary PTEG is full */ lis r4, (primary_pteg_full - PAGE_OFFSET)@ha @@ -399,7 +399,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 2: LDPTEu r6,HPTE_SIZE(r4) TST_V(r6) bdnzf 2,2b - beq+ found_empty + beq+ .Lfound_empty xori r5,r5,PTE_H /* clear H bit again */ /* @@ -437,9 +437,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) #ifndef CONFIG_SMP /* Store PTE in PTEG */ -found_empty: +.Lfound_empty: STPTE r5,0(r4) -found_slot: +.Lfound_slot: STPTE r8,HPTE_SIZE/2(r4) #else /* CONFIG_SMP */ @@ -460,8 +460,8 @@ found_slot: * We do however have to make sure that the PTE is never in an invalid * state with the V bit set. */ -found_empty: -found_slot: +.Lfound_empty: +.Lfound_slot: CLR_V(r5,r0) /* clear V (valid) bit in PTE */ STPTE r5,0(r4) sync -- cgit v1.2.3-59-g8ed1b From e6209318d63e2774c5ab214b14b948079e040064 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:42 +0000 Subject: powerpc/32s: Blacklist functions running with MMU disabled for kprobe kprobe does not handle events happening in real mode, all functions running with MMU disabled have to be blacklisted. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/dabed523c1b8955dd425152ce260b390053e727a.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/mm/book3s32/hash_low.S | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index 7afd7ec4ca50..2702e8762c0d 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -163,6 +163,7 @@ _GLOBAL(hash_page) stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) blr #endif /* CONFIG_SMP */ +_ASM_NOKPROBE_SYMBOL(hash_page) /* * Add an entry for a particular page to the hash table. @@ -267,6 +268,7 @@ _GLOBAL(add_hash_page) lwz r0,4(r1) mtlr r0 blr +_ASM_NOKPROBE_SYMBOL(add_hash_page) /* * This routine adds a hardware PTE to the hash table. @@ -474,6 +476,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) sync /* make sure pte updates get to memory */ blr +_ASM_NOKPROBE_SYMBOL(create_hpte) .section .bss .align 2 @@ -630,6 +633,7 @@ _GLOBAL(flush_hash_pages) isync blr EXPORT_SYMBOL(flush_hash_pages) +_ASM_NOKPROBE_SYMBOL(flush_hash_pages) /* * Flush an entry from the TLB @@ -667,6 +671,7 @@ _GLOBAL(_tlbie) sync #endif /* CONFIG_SMP */ blr +_ASM_NOKPROBE_SYMBOL(_tlbie) /* * Flush the entire TLB. 603/603e only @@ -708,3 +713,4 @@ _GLOBAL(_tlbia) isync #endif /* CONFIG_SMP */ blr +_ASM_NOKPROBE_SYMBOL(_tlbia) -- cgit v1.2.3-59-g8ed1b From 32746dfe4cf37f4077929601e8877a7fd02676e8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:43 +0000 Subject: powerpc/rtas: Remove machine_check_in_rtas() machine_check_in_rtas() is just a trap. Do the trap directly in the machine check exception handler. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/78899f40f89cb3c4f69bdff7f04eb6ec7cb753d5.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/kernel/entry_32.S | 6 ------ arch/powerpc/kernel/head_32.S | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index a7b261440d59..126ede4d591f 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -1380,10 +1380,4 @@ _GLOBAL(enter_rtas) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 RFI /* return to caller */ - - .globl machine_check_in_rtas -machine_check_in_rtas: - twi 31,0,0 - /* XXX load up BATs and panic */ - #endif /* CONFIG_PPC_RTAS */ diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 97c887950c3c..e2459550a3bf 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -297,7 +297,7 @@ MachineCheck: cmpwi cr1, r4, 0 #endif beq cr1, machine_check_tramp - b machine_check_in_rtas + twi 31, 0, 0 #else b machine_check_tramp #endif -- cgit v1.2.3-59-g8ed1b From 5f32e8361cba8c58c4f272a389296f489ecc2823 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:44 +0000 Subject: powerpc/32: Blacklist functions running with MMU disabled for kprobe kprobe does not handle events happening in real mode, all functions running with MMU disabled have to be blacklisted. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/3bf57066d05518644dee0840af69d36ab5086729.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/kernel/cpu_setup_6xx.S | 2 ++ arch/powerpc/kernel/entry_32.S | 3 +++ arch/powerpc/kernel/fpu.S | 1 + arch/powerpc/kernel/idle_6xx.S | 1 + arch/powerpc/kernel/idle_e500.S | 1 + arch/powerpc/kernel/l2cr_6xx.S | 1 + arch/powerpc/kernel/misc.S | 2 ++ arch/powerpc/kernel/misc_32.S | 2 ++ arch/powerpc/kernel/swsusp_32.S | 2 ++ arch/powerpc/kernel/vector.S | 1 + 10 files changed, 16 insertions(+) diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index f6517f67265a..f8b5ff64b604 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -288,6 +288,7 @@ _GLOBAL(__init_fpu_registers) mtmsr r10 isync blr +_ASM_NOKPROBE_SYMBOL(__init_fpu_registers) /* Definitions for the table use to save CPU states */ @@ -483,4 +484,5 @@ _GLOBAL(__restore_cpu_setup) 1: mtcr r7 blr +_ASM_NOKPROBE_SYMBOL(__restore_cpu_setup) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 126ede4d591f..d54f40650ff8 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -312,6 +312,7 @@ stack_ovf: mtspr SPRN_SRR1,r10 SYNC RFI +_ASM_NOKPROBE_SYMBOL(stack_ovf) #endif #ifdef CONFIG_TRACE_IRQFLAGS @@ -1326,6 +1327,7 @@ nonrecoverable: bl unrecoverable_exception /* shouldn't return */ b 4b +_ASM_NOKPROBE_SYMBOL(nonrecoverable) .section .bss .align 2 @@ -1380,4 +1382,5 @@ _GLOBAL(enter_rtas) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 RFI /* return to caller */ +_ASM_NOKPROBE_SYMBOL(enter_rtas) #endif /* CONFIG_PPC_RTAS */ diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 3235a8da6af7..1dfccf58fbb1 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -119,6 +119,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) /* restore registers and return */ /* we haven't used ctr or xer or lr */ blr +_ASM_NOKPROBE_SYMBOL(load_up_fpu) /* * save_fpu(tsk) diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 433d97bea1f3..69df840f7253 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -187,6 +187,7 @@ BEGIN_FTR_SECTION mtspr SPRN_HID1, r9 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) b transfer_to_handler_cont +_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore) .data diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S index 308f499e146c..72c85b6f3898 100644 --- a/arch/powerpc/kernel/idle_e500.S +++ b/arch/powerpc/kernel/idle_e500.S @@ -90,3 +90,4 @@ _GLOBAL(power_save_ppc32_restore) #endif b transfer_to_handler_cont +_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore) diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 2020d255585f..5f07aa5e9851 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -455,5 +455,6 @@ _GLOBAL(__inval_enable_L1) sync blr +_ASM_NOKPROBE_SYMBOL(__inval_enable_L1) diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 65f9f731c229..5be96feccb55 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S @@ -36,6 +36,8 @@ _GLOBAL(add_reloc_offset) add r3,r3,r5 mtlr r0 blr +_ASM_NOKPROBE_SYMBOL(reloc_offset) +_ASM_NOKPROBE_SYMBOL(add_reloc_offset) .align 3 2: PPC_LONG 1b diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 70bb885b14c6..b24f866fef81 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -246,6 +246,7 @@ _GLOBAL(real_readb) sync isync blr +_ASM_NOKPROBE_SYMBOL(real_readb) /* * Do an IO access in real mode @@ -263,6 +264,7 @@ _GLOBAL(real_writeb) sync isync blr +_ASM_NOKPROBE_SYMBOL(real_writeb) #endif /* CONFIG_40x */ diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index cbdf86228eaa..f73f4d72fea4 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -395,6 +395,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) li r3,0 blr +_ASM_NOKPROBE_SYMBOL(swsusp_arch_resume) /* FIXME:This construct is actually not useful since we don't shut * down the instruction MMU, we could just flip back MSR-DR on. @@ -406,4 +407,5 @@ turn_on_mmu: sync isync rfi +_ASM_NOKPROBE_SYMBOL(turn_on_mmu) diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index d20c5e79e03c..efc5b52f95d2 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -89,6 +89,7 @@ _GLOBAL(load_up_altivec) REST_32VRS(0,r4,r6) /* restore registers and return */ blr +_ASM_NOKPROBE_SYMBOL(load_up_altivec) /* * save_altivec(tsk) -- cgit v1.2.3-59-g8ed1b From a616c442119f2ea5641e6abc215d7255b73b982b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:45 +0000 Subject: powerpc/entry32: Blacklist exception entry points for kprobe. kprobe does not handle events happening in real mode. As exception entry points are running with MMU disabled, blacklist them. The handling of TLF_NAPPING and TLF_SLEEPING is moved before the CONFIG_TRACE_IRQFLAGS which contains 'reenable_mmu' because from there kprobe will be possible as the kernel will run with MMU enabled. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/f61ac599855e674ebb592464d0ea32a3ba9c6644.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/kernel/entry_32.S | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index d54f40650ff8..c9fc6ea36bc6 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -50,6 +50,7 @@ mcheck_transfer_to_handler: mfspr r0,SPRN_DSRR1 stw r0,_DSRR1(r11) /* fall through */ +_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler) .globl debug_transfer_to_handler debug_transfer_to_handler: @@ -58,6 +59,7 @@ debug_transfer_to_handler: mfspr r0,SPRN_CSRR1 stw r0,_CSRR1(r11) /* fall through */ +_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler) .globl crit_transfer_to_handler crit_transfer_to_handler: @@ -93,6 +95,7 @@ crit_transfer_to_handler: rlwinm r0,r1,0,0,(31 - THREAD_SHIFT) stw r0,KSP_LIMIT(r8) /* fall through */ +_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler) #endif #ifdef CONFIG_40x @@ -114,6 +117,7 @@ crit_transfer_to_handler: rlwinm r0,r1,0,0,(31 - THREAD_SHIFT) stw r0,KSP_LIMIT(r8) /* fall through */ +_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler) #endif /* @@ -126,6 +130,7 @@ crit_transfer_to_handler: .globl transfer_to_handler_full transfer_to_handler_full: SAVE_NVGPRS(r11) +_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full) /* fall through */ .globl transfer_to_handler @@ -226,6 +231,23 @@ transfer_to_handler_cont: SYNC RFI /* jump to handler, enable MMU */ +#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) +4: rlwinm r12,r12,0,~_TLF_NAPPING + stw r12,TI_LOCAL_FLAGS(r2) + b power_save_ppc32_restore + +7: rlwinm r12,r12,0,~_TLF_SLEEPING + stw r12,TI_LOCAL_FLAGS(r2) + lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ + rlwinm r9,r9,0,~MSR_EE + lwz r12,_LINK(r11) /* and return to address in LR */ + kuap_restore r11, r2, r3, r4, r5 + lwz r2, GPR2(r11) + b fast_exception_return +#endif +_ASM_NOKPROBE_SYMBOL(transfer_to_handler) +_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont) + #ifdef CONFIG_TRACE_IRQFLAGS 1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to * keep interrupts disabled at this point otherwise we might risk @@ -271,21 +293,6 @@ reenable_mmu: bctr /* jump to handler */ #endif /* CONFIG_TRACE_IRQFLAGS */ -#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) -4: rlwinm r12,r12,0,~_TLF_NAPPING - stw r12,TI_LOCAL_FLAGS(r2) - b power_save_ppc32_restore - -7: rlwinm r12,r12,0,~_TLF_SLEEPING - stw r12,TI_LOCAL_FLAGS(r2) - lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ - rlwinm r9,r9,0,~MSR_EE - lwz r12,_LINK(r11) /* and return to address in LR */ - kuap_restore r11, r2, r3, r4, r5 - lwz r2, GPR2(r11) - b fast_exception_return -#endif - #ifndef CONFIG_VMAP_STACK /* * On kernel stack overflow, load up an initial stack pointer -- cgit v1.2.3-59-g8ed1b From 7cdf4401388572f720403a7038a178a4b30ac14c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:46 +0000 Subject: powerpc/entry32: Blacklist syscall exit points for kprobe. kprobe does not handle events happening in real mode. The very last part of syscall cannot support a trap. Add a symbol syscall_exit_finish to identify that part and blacklist it from kprobe. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/23eddf49abb03d1359fa0be4206998eb3800f42c.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/kernel/entry_32.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index c9fc6ea36bc6..5fa192b33509 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -462,6 +462,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) lwz r7,_NIP(r1) lwz r2,GPR2(r1) lwz r1,GPR1(r1) +syscall_exit_finish: #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) mtspr SPRN_NRI, r0 #endif @@ -469,6 +470,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) mtspr SPRN_SRR1,r8 SYNC RFI +_ASM_NOKPROBE_SYMBOL(syscall_exit_finish) #ifdef CONFIG_44x 2: li r7,0 iccci r0,r0 @@ -600,6 +602,7 @@ ret_from_kernel_syscall: mtspr SPRN_SRR1, r10 SYNC RFI +_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) /* * The fork/clone functions need to copy the full register set into -- cgit v1.2.3-59-g8ed1b From e51c3e13709fe55d4d0eb50ba435bc53a64152bf Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 31 Mar 2020 16:03:47 +0000 Subject: powerpc/entry32: Blacklist exception exit points for kprobe. kprobe does not handle events happening in real mode. The very last part of exception exits cannot support a trap. Blacklist them from kprobe. While we are at it, remove exc_exit_start symbol which is not used to avoid having to blacklist it. Signed-off-by: Christophe Leroy Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/098b0fd3f6299aa1bd692bd576bd7012c84608de.1585670437.git.christophe.leroy@c-s.fr --- arch/powerpc/kernel/entry_32.S | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 5fa192b33509..217ebdf5b00b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -806,6 +806,7 @@ fast_exception_return: lwz r11,GPR11(r11) SYNC RFI +_ASM_NOKPROBE_SYMBOL(fast_exception_return) #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) /* check if the exception happened in a restartable section */ @@ -1041,6 +1042,8 @@ exc_exit_restart: exc_exit_restart_end: SYNC RFI +_ASM_NOKPROBE_SYMBOL(exc_exit_restart) +_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end) #else /* !(CONFIG_4xx || CONFIG_BOOKE) */ /* @@ -1062,7 +1065,6 @@ exc_exit_restart_end: exc_exit_restart: lwz r11,_NIP(r1) lwz r12,_MSR(r1) -exc_exit_start: mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 REST_2GPRS(11, r1) @@ -1071,6 +1073,7 @@ exc_exit_start: exc_exit_restart_end: rfi b . /* prevent prefetch past rfi */ +_ASM_NOKPROBE_SYMBOL(exc_exit_restart) /* * Returning from a critical interrupt in user mode doesn't need @@ -1182,6 +1185,7 @@ ret_from_crit_exc: mtspr SPRN_SRR0,r9; mtspr SPRN_SRR1,r10; RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) +_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) #endif /* CONFIG_40x */ #ifdef CONFIG_BOOKE @@ -1193,6 +1197,7 @@ ret_from_crit_exc: RESTORE_xSRR(SRR0,SRR1); RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) +_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) .globl ret_from_debug_exc ret_from_debug_exc: @@ -1203,6 +1208,7 @@ ret_from_debug_exc: RESTORE_xSRR(CSRR0,CSRR1); RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI) +_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc) .globl ret_from_mcheck_exc ret_from_mcheck_exc: @@ -1214,6 +1220,7 @@ ret_from_mcheck_exc: RESTORE_xSRR(DSRR0,DSRR1); RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) +_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) #endif /* CONFIG_BOOKE */ /* -- cgit v1.2.3-59-g8ed1b From 332ce969b763553e9c4d55069e1e15aba4ea560f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 15 Apr 2020 10:06:09 +0000 Subject: powerpc/8xx: Reduce time spent in allow_user_access() and friends To enable/disable kernel access to user space, the 8xx has to modify the properties of access group 1. This is done by writing predefined values into SPRN_Mx_AP registers. As of today, a __put_user() gives: 00000d64 : d64: 3d 20 4f ff lis r9,20479 d68: 61 29 ff ff ori r9,r9,65535 d6c: 7d 3a c3 a6 mtspr 794,r9 d70: 39 20 00 00 li r9,0 d74: 90 83 00 00 stw r4,0(r3) d78: 3d 20 6f ff lis r9,28671 d7c: 61 29 ff ff ori r9,r9,65535 d80: 7d 3a c3 a6 mtspr 794,r9 d84: 4e 80 00 20 blr Because only groups 0 and 1 are used, the definition of groups 2 to 15 doesn't matter. By setting unused bits to 0 instead on 1, one instruction is removed for each lock and unlock action: 00000d5c : d5c: 3d 20 40 00 lis r9,16384 d60: 7d 3a c3 a6 mtspr 794,r9 d64: 39 20 00 00 li r9,0 d68: 90 83 00 00 stw r4,0(r3) d6c: 3d 20 60 00 lis r9,24576 d70: 7d 3a c3 a6 mtspr 794,r9 d74: 4e 80 00 20 blr Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/57425c33dd72f292b1a23570244b81419072a7aa.1586945153.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index e82368838416..1d9ac0f9c794 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -36,16 +36,16 @@ * Therefore, we define 2 APG groups. lsb is _PMD_USER * 0 => Kernel => 01 (all accesses performed according to page definition) * 1 => User => 00 (all accesses performed as supervisor iaw page definition) - * 2-16 => NA => 11 (all accesses performed as user iaw page definition) + * 2-15 => Not Used */ -#define MI_APG_INIT 0x4fffffff +#define MI_APG_INIT 0x40000000 /* * 0 => Kernel => 01 (all accesses performed according to page definition) * 1 => User => 10 (all accesses performed according to swaped page definition) - * 2-16 => NA => 11 (all accesses performed as user iaw page definition) + * 2-15 => Not Used */ -#define MI_APG_KUEP 0x6fffffff +#define MI_APG_KUEP 0x60000000 /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MI_RPN is written, bits in @@ -115,16 +115,16 @@ * Therefore, we define 2 APG groups. lsb is _PMD_USER * 0 => Kernel => 01 (all accesses performed according to page definition) * 1 => User => 00 (all accesses performed as supervisor iaw page definition) - * 2-16 => NA => 11 (all accesses performed as user iaw page definition) + * 2-15 => Not Used */ -#define MD_APG_INIT 0x4fffffff +#define MD_APG_INIT 0x40000000 /* * 0 => No user => 01 (all accesses performed according to page definition) * 1 => User => 10 (all accesses performed according to swaped page definition) - * 2-16 => NA => 11 (all accesses performed as user iaw page definition) + * 2-15 => Not Used */ -#define MD_APG_KUAP 0x6fffffff +#define MD_APG_KUAP 0x60000000 /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MD_RPN is written, bits in -- cgit v1.2.3-59-g8ed1b From 547e687b2981a115814962506068873d24983af7 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 15 Apr 2020 14:57:09 +0000 Subject: powerpc/uaccess: Don't set KUAP by default on book3s/32 On book3s/32, KUAP is an heavy process as it requires to determine which segments are impacted and unlock/lock each of them. And since the implementation of user_access_begin/end, it is even worth for the time being because unlike __get_user(), user_access_begin doesn't make difference between read and write and unlocks access also for read allthought that's unneeded on book3s/32. As shown by the size of a kernel built with KUAP and one without, the overhead is 64k bytes of code. As a comparison a similar build on an 8xx has an overhead of only 8k bytes of code. text data bss dec hex filename 7230416 1425868 837376 9493660 90dc9c vmlinux.kuap6xx 7165012 1425548 837376 9427936 8fdbe0 vmlinux.nokuap6xx 6519796 1960028 477464 8957288 88ad68 vmlinux.kuap8xx 6511664 1959864 477464 8948992 888d00 vmlinux.nokuap8xx Until a more optimised KUAP is implemented on book3s/32, don't select it by default. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/154a99399317b096ac1f04827b9f8d7a9179ddc1.1586962586.git.christophe.leroy@c-s.fr --- arch/powerpc/platforms/Kconfig.cputype | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index e3fb0ef5129f..9a7baf31d276 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -389,7 +389,7 @@ config PPC_HAVE_KUAP config PPC_KUAP bool "Kernel Userspace Access Protection" depends on PPC_HAVE_KUAP - default y + default y if !PPC_BOOK3S_32 help Enable support for Kernel Userspace Access Protection (KUAP) -- cgit v1.2.3-59-g8ed1b From c3ba4dbbd1d05b49ec01efe098e0a78857d3ce22 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 15 Apr 2020 14:57:11 +0000 Subject: powerpc/uaccess: Don't set KUEP by default on book3s/32 On book3s/32, KUEP is an heavy process as it requires to set/unset the NX bit in each of the 12 user segments everytime the kernel is entered/exited from/to user space. Don't select KUEP by default on book3s/32. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1492bb150c1aaa53d99a604b49992e60ea20cd5f.1586962582.git.christophe.leroy@c-s.fr --- arch/powerpc/platforms/Kconfig.cputype | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 9a7baf31d276..d349603fb889 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -377,7 +377,7 @@ config PPC_HAVE_KUEP config PPC_KUEP bool "Kernel Userspace Execution Prevention" depends on PPC_HAVE_KUEP - default y + default y if !PPC_BOOK3S_32 help Enable support for Kernel Userspace Execution Prevention (KUEP) -- cgit v1.2.3-59-g8ed1b From 888468ce725a4cd56d72dc7e5096078f7a9251a0 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 28 May 2020 10:17:04 +0000 Subject: powerpc/32: Disable KASAN with pages bigger than 16k Mapping of early shadow area is implemented by using a single static page table having all entries pointing to the same early shadow page. The shadow area must therefore occupy full PGD entries. The shadow area has a size of 128MB starting at 0xf8000000. With 4k pages, a PGD entry is 4MB With 16k pages, a PGD entry is 64MB With 64k pages, a PGD entry is 1GB which is too big. Until we rework the early shadow mapping, disable KASAN when the page size is too big. Fixes: 2edb16efc899 ("powerpc/32: Add KASAN support") Cc: stable@vger.kernel.org # v5.2+ Reported-by: kbuild test robot Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7195fcde7314ccbf7a081b356084a69d421b10d4.1590660977.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1e69cc299547..2d5367afbb5e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -169,8 +169,8 @@ config PPC select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if PPC32 - select HAVE_ARCH_KASAN_VMALLOC if PPC32 + select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 + select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT -- cgit v1.2.3-59-g8ed1b From 03b51416e876aea5e7638947e50831b6c988c246 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Tue, 21 Apr 2020 23:05:43 +0530 Subject: powerpc/module_64: Consolidate ftrace code module_trampoline_target() is only used by ftrace. Move the prototype within the appropriate #ifdef in the header. Also, move the function body to the end of module_64.c so as to consolidate all ftrace code in one place. No functional changes. Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/2527351f65c53c5866068ae130dc34c5d4ee8ad9.1587488954.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/include/asm/module.h | 3 -- arch/powerpc/kernel/module_64.c | 69 +++++++++++++++++++-------------------- 2 files changed, 33 insertions(+), 39 deletions(-) diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 356658711a86..6b99d773f522 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -90,12 +90,9 @@ struct mod_arch_specific { # ifdef MODULE asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous"); # endif /* MODULE */ -#endif int module_trampoline_target(struct module *mod, unsigned long trampoline, unsigned long *target); - -#ifdef CONFIG_DYNAMIC_FTRACE int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs); #else static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index f390451ad915..0acec12d0985 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -145,42 +145,6 @@ static u32 ppc64_stub_insns[] = { PPC_INST_BCTR, }; -#ifdef CONFIG_DYNAMIC_FTRACE -int module_trampoline_target(struct module *mod, unsigned long addr, - unsigned long *target) -{ - struct ppc64_stub_entry *stub; - func_desc_t funcdata; - u32 magic; - - if (!within_module_core(addr, mod)) { - pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name); - return -EFAULT; - } - - stub = (struct ppc64_stub_entry *)addr; - - if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) { - pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name); - return -EFAULT; - } - - if (magic != STUB_MAGIC) { - pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name); - return -EFAULT; - } - - if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) { - pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name); - return -EFAULT; - } - - *target = stub_func_addr(funcdata); - - return 0; -} -#endif - /* Count how many different 24-bit relocations (different symbol, different addend) */ static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num) @@ -731,6 +695,39 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } #ifdef CONFIG_DYNAMIC_FTRACE +int module_trampoline_target(struct module *mod, unsigned long addr, + unsigned long *target) +{ + struct ppc64_stub_entry *stub; + func_desc_t funcdata; + u32 magic; + + if (!within_module_core(addr, mod)) { + pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name); + return -EFAULT; + } + + stub = (struct ppc64_stub_entry *)addr; + + if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) { + pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name); + return -EFAULT; + } + + if (magic != STUB_MAGIC) { + pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name); + return -EFAULT; + } + + if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) { + pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name); + return -EFAULT; + } + + *target = stub_func_addr(funcdata); + + return 0; +} #ifdef CONFIG_MPROFILE_KERNEL -- cgit v1.2.3-59-g8ed1b From 1f2aaed2db03150428dbcd2ddee02ae6cb4bac52 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Tue, 21 Apr 2020 23:05:44 +0530 Subject: powerpc/module_64: Simplify check for -mprofile-kernel ftrace relocations For -mprofile-kernel, we need special handling when generating stubs for ftrace calls such as _mcount(). To faciliate this, we check if a R_PPC64_REL24 relocation is for a symbol named "_mcount()" along with also checking the instruction sequence. The latter is not really required since "_mcount()" is an exported symbol and kernel modules cannot use it. As such, drop the additional checking and simplify the code. This helps unify stub creation for ftrace stubs with -mprofile-kernel and aids in code reuse. Also rename is_mprofile_mcount_callsite() to is_mprofile_ftrace_call() to reflect the checking being done. Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7d9c316adfa1fb787ad268bb4691e7e4059ff2d5.1587488954.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/kernel/module_64.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 0acec12d0985..de317c158f38 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -400,19 +400,9 @@ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, } #ifdef CONFIG_MPROFILE_KERNEL -static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction) +static bool is_mprofile_ftrace_call(const char *name) { - if (strcmp("_mcount", name)) - return false; - - /* - * Check if this is one of the -mprofile-kernel sequences. - */ - if (instruction[-1] == PPC_INST_STD_LR && - instruction[-2] == PPC_INST_MFLR) - return true; - - if (instruction[-1] == PPC_INST_MFLR) + if (!strcmp("_mcount", name)) return true; return false; @@ -436,7 +426,7 @@ static void squash_toc_save_inst(const char *name, unsigned long addr) #else static void squash_toc_save_inst(const char *name, unsigned long addr) { } -static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction) +static bool is_mprofile_ftrace_call(const char *name) { return false; } @@ -448,7 +438,7 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me) { u32 *prev_insn = instruction - 1; - if (is_mprofile_mcount_callsite(name, prev_insn)) + if (is_mprofile_ftrace_call(name)) return 1; /* -- cgit v1.2.3-59-g8ed1b From bd55e792de0844631d34487d43eaf3f13294ebfe Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Tue, 21 Apr 2020 23:05:45 +0530 Subject: powerpc/module_64: Use special stub for _mcount() with -mprofile-kernel Since commit c55d7b5e64265f ("powerpc: Remove STRICT_KERNEL_RWX incompatibility with RELOCATABLE"), powerpc kernels with -mprofile-kernel can crash in certain scenarios with a trace like below: BUG: Unable to handle kernel instruction fetch (NULL pointer?) Faulting instruction address: 0x00000000 Oops: Kernel access of bad area, sig: 11 [#1] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=256 DEBUG_PAGEALLOC NUMA PowerNV NIP [0000000000000000] 0x0 LR [c0080000102c0048] ext4_iomap_end+0x8/0x30 [ext4] Call Trace: iomap_apply+0x20c/0x920 (unreliable) iomap_bmap+0xfc/0x160 ext4_bmap+0xa4/0x180 [ext4] bmap+0x4c/0x80 jbd2_journal_init_inode+0x44/0x1a0 [jbd2] ext4_load_journal+0x440/0x860 [ext4] ext4_fill_super+0x342c/0x3ab0 [ext4] mount_bdev+0x25c/0x290 ext4_mount+0x28/0x50 [ext4] legacy_get_tree+0x4c/0xb0 vfs_get_tree+0x4c/0x130 do_mount+0xa18/0xc50 sys_mount+0x158/0x180 system_call+0x5c/0x68 The NIP points to NULL, or a random location (data even), while the LR always points to the LEP of a function (with an offset of 8), indicating that something went wrong with ftrace. However, ftrace is not necessarily active when such crashes occur. The kernel OOPS sometimes follows a warning from ftrace indicating that some module functions could not be patched with a nop. Other times, if a module is loaded early during boot, instruction patching can fail due to a separate bug, but the error is not reported due to missing error reporting. In all the above cases when instruction patching fails, ftrace will be disabled but certain kernel module functions will be left with default calls to _mcount(). This is not a problem with ELFv1. However, with -mprofile-kernel, the default stub is problematic since it depends on a valid module TOC in r2. If the kernel (or a different module) calls into a function that does not use the TOC, the function won't have a prologue to setup the module TOC. When that function calls into _mcount(), we will end up in the relocation stub that will use the previous TOC, and end up trying to jump into a random location. From the above trace: iomap_apply+0x20c/0x920 [kernel TOC] | V ext4_iomap_end+0x8/0x30 [no GEP == kernel TOC] | V _mcount() stub [uses kernel TOC -> random entry] To address this, let's change over to using the special stub that is used for ftrace_[regs_]caller() for _mcount(). This ensures that we are not dependent on a valid module TOC in r2 for default _mcount() handling. Reported-by: Qian Cai Signed-off-by: Naveen N. Rao Tested-by: Qian Cai Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/8affd4298d22099bbd82544fab8185700a6222b1.1587488954.git.naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/kernel/module_64.c | 222 +++++++++++++++++++--------------------- 1 file changed, 104 insertions(+), 118 deletions(-) diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index de317c158f38..f4c2fa190192 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -334,6 +334,92 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, return 0; } +#ifdef CONFIG_MPROFILE_KERNEL + +#define PACATOC offsetof(struct paca_struct, kernel_toc) + +/* + * ld r12,PACATOC(r13) + * addis r12,r12, + * addi r12,r12, + * mtctr r12 + * bctr + */ +static u32 stub_insns[] = { + PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC, + PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12), + PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12), + PPC_INST_MTCTR | __PPC_RS(R12), + PPC_INST_BCTR, +}; + +/* + * For mprofile-kernel we use a special stub for ftrace_caller() because we + * can't rely on r2 containing this module's TOC when we enter the stub. + * + * That can happen if the function calling us didn't need to use the toc. In + * that case it won't have setup r2, and the r2 value will be either the + * kernel's toc, or possibly another modules toc. + * + * To deal with that this stub uses the kernel toc, which is always accessible + * via the paca (in r13). The target (ftrace_caller()) is responsible for + * saving and restoring the toc before returning. + */ +static inline int create_ftrace_stub(struct ppc64_stub_entry *entry, + unsigned long addr, + struct module *me) +{ + long reladdr; + + memcpy(entry->jump, stub_insns, sizeof(stub_insns)); + + /* Stub uses address relative to kernel toc (from the paca) */ + reladdr = addr - kernel_toc_addr(); + if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { + pr_err("%s: Address of %ps out of range of kernel_toc.\n", + me->name, (void *)addr); + return 0; + } + + entry->jump[1] |= PPC_HA(reladdr); + entry->jump[2] |= PPC_LO(reladdr); + + /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */ + entry->funcdata = func_desc(addr); + entry->magic = STUB_MAGIC; + + return 1; +} + +static bool is_mprofile_ftrace_call(const char *name) +{ + if (!strcmp("_mcount", name)) + return true; +#ifdef CONFIG_DYNAMIC_FTRACE + if (!strcmp("ftrace_caller", name)) + return true; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (!strcmp("ftrace_regs_caller", name)) + return true; +#endif +#endif + + return false; +} +#else +static inline int create_ftrace_stub(struct ppc64_stub_entry *entry, + unsigned long addr, + struct module *me) +{ + return 0; +} + +static bool is_mprofile_ftrace_call(const char *name) +{ + return false; +} +#endif + /* * r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the * value maximum span in an instruction which uses a signed offset). Round down @@ -349,10 +435,14 @@ static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me) static inline int create_stub(const Elf64_Shdr *sechdrs, struct ppc64_stub_entry *entry, unsigned long addr, - struct module *me) + struct module *me, + const char *name) { long reladdr; + if (is_mprofile_ftrace_call(name)) + return create_ftrace_stub(entry, addr, me); + memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); /* Stub uses address relative to r2. */ @@ -376,7 +466,8 @@ static inline int create_stub(const Elf64_Shdr *sechdrs, stub to set up the TOC ptr (r2) for the function. */ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, unsigned long addr, - struct module *me) + struct module *me, + const char *name) { struct ppc64_stub_entry *stubs; unsigned int i, num_stubs; @@ -393,45 +484,12 @@ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, return (unsigned long)&stubs[i]; } - if (!create_stub(sechdrs, &stubs[i], addr, me)) + if (!create_stub(sechdrs, &stubs[i], addr, me, name)) return 0; return (unsigned long)&stubs[i]; } -#ifdef CONFIG_MPROFILE_KERNEL -static bool is_mprofile_ftrace_call(const char *name) -{ - if (!strcmp("_mcount", name)) - return true; - - return false; -} - -/* - * In case of _mcount calls, do not save the current callee's TOC (in r2) into - * the original caller's stack frame. If we did we would clobber the saved TOC - * value of the original caller. - */ -static void squash_toc_save_inst(const char *name, unsigned long addr) -{ - struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr; - - /* Only for calls to _mcount */ - if (strcmp("_mcount", name) != 0) - return; - - stub->jump[2] = PPC_INST_NOP; -} -#else -static void squash_toc_save_inst(const char *name, unsigned long addr) { } - -static bool is_mprofile_ftrace_call(const char *name) -{ - return false; -} -#endif - /* We expect a noop next: if it is, replace it with instruction to restore r2. */ static int restore_r2(const char *name, u32 *instruction, struct module *me) @@ -576,14 +634,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, if (sym->st_shndx == SHN_UNDEF || sym->st_shndx == SHN_LIVEPATCH) { /* External: go via stub */ - value = stub_for_addr(sechdrs, value, me); + value = stub_for_addr(sechdrs, value, me, + strtab + sym->st_name); if (!value) return -ENOENT; if (!restore_r2(strtab + sym->st_name, (u32 *)location + 1, me)) return -ENOEXEC; - - squash_toc_save_inst(strtab + sym->st_name, value); } else value += local_entry_offset(sym); @@ -719,88 +776,17 @@ int module_trampoline_target(struct module *mod, unsigned long addr, return 0; } -#ifdef CONFIG_MPROFILE_KERNEL - -#define PACATOC offsetof(struct paca_struct, kernel_toc) - -/* - * For mprofile-kernel we use a special stub for ftrace_caller() because we - * can't rely on r2 containing this module's TOC when we enter the stub. - * - * That can happen if the function calling us didn't need to use the toc. In - * that case it won't have setup r2, and the r2 value will be either the - * kernel's toc, or possibly another modules toc. - * - * To deal with that this stub uses the kernel toc, which is always accessible - * via the paca (in r13). The target (ftrace_caller()) is responsible for - * saving and restoring the toc before returning. - */ -static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, - struct module *me, unsigned long addr) -{ - struct ppc64_stub_entry *entry; - unsigned int i, num_stubs; - /* - * ld r12,PACATOC(r13) - * addis r12,r12, - * addi r12,r12, - * mtctr r12 - * bctr - */ - static u32 stub_insns[] = { - PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC, - PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12), - PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12), - PPC_INST_MTCTR | __PPC_RS(R12), - PPC_INST_BCTR, - }; - long reladdr; - - num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry); - - /* Find the next available stub entry */ - entry = (void *)sechdrs[me->arch.stubs_section].sh_addr; - for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++); - - if (i >= num_stubs) { - pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name); - return 0; - } - - memcpy(entry->jump, stub_insns, sizeof(stub_insns)); - - /* Stub uses address relative to kernel toc (from the paca) */ - reladdr = addr - kernel_toc_addr(); - if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { - pr_err("%s: Address of %ps out of range of kernel_toc.\n", - me->name, (void *)addr); - return 0; - } - - entry->jump[1] |= PPC_HA(reladdr); - entry->jump[2] |= PPC_LO(reladdr); - - /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */ - entry->funcdata = func_desc(addr); - entry->magic = STUB_MAGIC; - - return (unsigned long)entry; -} -#else -static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, - struct module *me, unsigned long addr) -{ - return stub_for_addr(sechdrs, addr, me); -} -#endif - int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) { - mod->arch.tramp = create_ftrace_stub(sechdrs, mod, - (unsigned long)ftrace_caller); + mod->arch.tramp = stub_for_addr(sechdrs, + (unsigned long)ftrace_caller, + mod, + "ftrace_caller"); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - mod->arch.tramp_regs = create_ftrace_stub(sechdrs, mod, - (unsigned long)ftrace_regs_caller); + mod->arch.tramp_regs = stub_for_addr(sechdrs, + (unsigned long)ftrace_regs_caller, + mod, + "ftrace_regs_caller"); if (!mod->arch.tramp_regs) return -ENOENT; #endif -- cgit v1.2.3-59-g8ed1b From 74016701fe5f873ae23bf02835407227138d874d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 30 May 2020 17:16:33 +0000 Subject: powerpc/32s: Fix another build failure with CONFIG_PPC_KUAP_DEBUG 'thread' doesn't exist in kuap_check() macro. Use 'current' instead. Fixes: a68c31fc01ef ("powerpc/32s: Implement Kernel Userspace Access Protection") Cc: stable@vger.kernel.org Reported-by: kbuild test robot Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b459e1600b969047a74e34251a84a3d6fdf1f312.1590858925.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/kup.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 5a267b7e4971..32fd4452e960 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -2,6 +2,7 @@ #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H #define _ASM_POWERPC_BOOK3S_32_KUP_H +#include #include #ifdef __ASSEMBLY__ @@ -75,7 +76,7 @@ .macro kuap_check current, gpr #ifdef CONFIG_PPC_KUAP_DEBUG - lwz \gpr, KUAP(thread) + lwz \gpr, THREAD + KUAP(\current) 999: twnei \gpr, 0 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) #endif -- cgit v1.2.3-59-g8ed1b From 0828137e8f16721842468e33df0460044a0c588b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 28 May 2020 00:58:40 +1000 Subject: powerpc/64s: Don't init FSCR_DSCR in __init_FSCR() __init_FSCR() was added originally in commit 2468dcf641e4 ("powerpc: Add support for context switching the TAR register") (Feb 2013), and only set FSCR_TAR. At that point FSCR (Facility Status and Control Register) was not context switched, so the setting was permanent after boot. Later we added initialisation of FSCR_DSCR to __init_FSCR(), in commit 54c9b2253d34 ("powerpc: Set DSCR bit in FSCR setup") (Mar 2013), again that was permanent after boot. Then commit 2517617e0de6 ("powerpc: Fix context switch DSCR on POWER8") (Aug 2013) added a limited context switch of FSCR, just the FSCR_DSCR bit was context switched based on thread.dscr_inherit. That commit said "This clears the H/FSCR DSCR bit initially", but it didn't, it left the initialisation of FSCR_DSCR in __init_FSCR(). However the initial context switch from init_task to pid 1 would clear FSCR_DSCR because thread.dscr_inherit was 0. That commit also introduced the requirement that FSCR_DSCR be clear for user processes, so that we can take the facility unavailable interrupt in order to manage dscr_inherit. Then in commit 152d523e6307 ("powerpc: Create context switch helpers save_sprs() and restore_sprs()") (Dec 2015) FSCR was added to thread_struct. However it still wasn't fully context switched, we just took the existing value and set FSCR_DSCR if the new thread had dscr_inherit set. FSCR was still initialised at boot to FSCR_DSCR | FSCR_TAR, but that value was not propagated into the thread_struct, so the initial context switch set FSCR_DSCR back to 0. Finally commit b57bd2de8c6c ("powerpc: Improve FSCR init and context switching") (Jun 2016) added a full context switch of the FSCR, and added an initialisation of init_task.thread.fscr to FSCR_TAR | FSCR_EBB, but omitted FSCR_DSCR. The end result is that swapper runs with FSCR_DSCR set because of the initialisation in __init_FSCR(), but no other processes do, they use the value from init_task.thread.fscr. Having FSCR_DSCR set for swapper allows it to access SPR 3 from userspace, but swapper never runs userspace, so it has no useful effect. It's also confusing to have the value initialised in two places to two different values. So remove FSCR_DSCR from __init_FSCR(), this at least gets us to the point where there's a single value of FSCR, even if it's still set in two places. Signed-off-by: Michael Ellerman Tested-by: Alistair Popple Link: https://lore.kernel.org/r/20200527145843.2761782-1-mpe@ellerman.id.au --- arch/powerpc/kernel/cpu_setup_power.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index a460298c7ddb..f91ecb10d0ae 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -184,7 +184,7 @@ __init_LPCR_ISA300: __init_FSCR: mfspr r3,SPRN_FSCR - ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB + ori r3,r3,FSCR_TAR|FSCR_EBB mtspr SPRN_FSCR,r3 blr -- cgit v1.2.3-59-g8ed1b From 993e3d96fd08c3ebf7566e43be9b8cd622063e6d Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 28 May 2020 00:58:41 +1000 Subject: powerpc/64s: Don't let DT CPU features set FSCR_DSCR The device tree CPU features binding includes FSCR bit numbers which Linux is instructed to set by firmware. Whether that's a good idea or not, in the case of the DSCR the Linux implementation has a hard requirement that the FSCR_DSCR bit not be set by default. We use it to track when a process reads/writes to DSCR, so it must be clear to begin with. So if firmware tells us to set FSCR_DSCR we must ignore it. Currently this does not cause a bug in our DSCR handling because the value of FSCR that the device tree CPU features code establishes is only used by swapper. All other tasks use the value hard coded in init_task.thread.fscr. However we'd like to fix that in a future commit, at which point this will become necessary. Fixes: 5a61ef74f269 ("powerpc/64s: Support new device tree binding for discovering CPU features") Cc: stable@vger.kernel.org # v4.12+ Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200527145843.2761782-2-mpe@ellerman.id.au --- arch/powerpc/kernel/dt_cpu_ftrs.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 36bc0d5c4f3a..fca4d7ff22b9 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -346,6 +346,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f) { u64 lpcr; + /* + * Linux relies on FSCR[DSCR] being clear, so that we can take the + * facility unavailable interrupt and track the task's usage of DSCR. + * See facility_unavailable_exception(). + * Clear the bit here so that feat_enable() doesn't set it. + */ + f->fscr_bit_nr = -1; + feat_enable(f); lpcr = mfspr(SPRN_LPCR); -- cgit v1.2.3-59-g8ed1b From 912c0a7f2b5daa3cbb2bc10f303981e493de73bd Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 28 May 2020 00:58:42 +1000 Subject: powerpc/64s: Save FSCR to init_task.thread.fscr after feature init At boot the FSCR is initialised via one of two paths. On most systems it's set to a hard coded value in __init_FSCR(). On newer skiboot systems we use the device tree CPU features binding, where firmware can tell Linux what bits to set in FSCR (and HFSCR). In both cases the value that's configured at boot is not propagated into the init_task.thread.fscr value prior to the initial fork of init (pid 1), which means the value is not used by any processes other than swapper (the idle task). For the __init_FSCR() case this is OK, because the value in init_task.thread.fscr is initialised to something sensible. However it does mean that the value set in __init_FSCR() is not used other than for swapper, which is odd and confusing. The bigger problem is for the device tree CPU features case it prevents firmware from setting (or clearing) FSCR bits for use by user space. This means all existing kernels can not have features enabled/disabled by firmware if those features require setting/clearing FSCR bits. We can handle both cases by saving the FSCR value into init_task.thread.fscr after we have initialised it at boot. This fixes the bug for device tree CPU features, and will allow us to simplify the initialisation for the __init_FSCR() case in a future patch. Fixes: 5a61ef74f269 ("powerpc/64s: Support new device tree binding for discovering CPU features") Cc: stable@vger.kernel.org # v4.12+ Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200527145843.2761782-3-mpe@ellerman.id.au --- arch/powerpc/kernel/prom.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9a651366d385..6a3bac357e24 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -690,6 +690,23 @@ static void __init tm_init(void) static void tm_init(void) { } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ +#ifdef CONFIG_PPC64 +static void __init save_fscr_to_task(void) +{ + /* + * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we + * have configured via the device tree features or via __init_FSCR(). + * That value will then be propagated to pid 1 (init) and all future + * processes. + */ + if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) + init_task.thread.fscr = mfspr(SPRN_FSCR); +} +#else +static inline void save_fscr_to_task(void) {}; +#endif + + void __init early_init_devtree(void *params) { phys_addr_t limit; @@ -778,6 +795,8 @@ void __init early_init_devtree(void *params) BUG(); } + save_fscr_to_task(); + #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) /* We'll later wait for secondaries to check in; there are * NCPUS-1 non-boot CPUs :-) -- cgit v1.2.3-59-g8ed1b From c887ef5707591e84f80271e95e99ff9fb38987b5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 28 May 2020 00:58:43 +1000 Subject: powerpc/64s: Don't set FSCR bits in INIT_THREAD Since the previous commit that saves the value of FSCR configured at boot into init_task.thread.fscr, the static initialisation in INIT_THREAD now no longer has any effect. So remove it. For non DT CPU features, the end result is the same, because __init_FSCR() is called on all CPUs that have an FSCR (Power8, Power9), and it sets FSCR_TAR & FSCR_EBB. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200527145843.2761782-4-mpe@ellerman.id.au --- arch/powerpc/include/asm/processor.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 4e53df163b92..52a67835057a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -302,7 +302,6 @@ struct thread_struct { .ksp = INIT_SP, \ .addr_limit = KERNEL_DS, \ .fpexc_mode = 0, \ - .fscr = FSCR_TAR | FSCR_EBB \ } #endif -- cgit v1.2.3-59-g8ed1b From ee988c11acf6f9464b7b44e9a091bf6afb3b3a49 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 21 May 2020 11:43:35 +1000 Subject: powerpc: Add new HWCAP bits POWER10 introduces two new architectural features - ISAv3.1 and matrix multiply assist (MMA) instructions. Userspace detects the presence of these features via two HWCAP bits introduced in this patch. These bits have been agreed to by the compiler and binutils team. According to ISAv3.1 MMA is an optional feature and software that makes use of it should first check for availability via this HWCAP bit and use alternate code paths if unavailable. Signed-off-by: Alistair Popple Tested-by: Michael Neuling Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200521014341.29095-2-alistair@popple.id.au --- arch/powerpc/include/uapi/asm/cputable.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h index 540592034740..731b97dc2d15 100644 --- a/arch/powerpc/include/uapi/asm/cputable.h +++ b/arch/powerpc/include/uapi/asm/cputable.h @@ -50,6 +50,8 @@ #define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */ #define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */ #define PPC_FEATURE2_HTM_NO_SUSPEND 0x00080000 /* TM w/out suspended state */ +#define PPC_FEATURE2_ARCH_3_1 0x00040000 /* ISA 3.1 */ +#define PPC_FEATURE2_MMA 0x00020000 /* Matrix Multiply Assist */ /* * IMPORTANT! -- cgit v1.2.3-59-g8ed1b From 3fd5836ee801ab9ac5b314c26550e209bafa5eaa Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 21 May 2020 11:43:36 +1000 Subject: powerpc: Add support for ISA v3.1 Newer ISA versions are enabled by clearing all bits in the PCR associated with previous versions of the ISA. Enable ISA v3.1 support by updating the PCR mask to include ISA v3.0. This ensures all PCR bits corresponding to earlier architecture versions get cleared thereby enabling ISA v3.1 if supported by the hardware. Signed-off-by: Alistair Popple Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200521014341.29095-3-alistair@popple.id.au --- arch/powerpc/include/asm/cputable.h | 1 + arch/powerpc/include/asm/reg.h | 3 ++- arch/powerpc/kvm/book3s_hv.c | 3 --- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index c67b94f3334c..1559dbf72842 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -213,6 +213,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000) #define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000) #define CPU_FTR_P9_RADIX_PREFETCH_BUG LONG_ASM_CONST(0x0002000000000000) +#define CPU_FTR_ARCH_31 LONG_ASM_CONST(0x0004000000000000) #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 054f8a71d686..dd20af367b57 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -487,10 +487,11 @@ * determine both the compatibility level which we want to emulate and the * compatibility level which the host is capable of emulating. */ +#define PCR_ARCH_300 0x10 /* Architecture 3.00 */ #define PCR_ARCH_207 0x8 /* Architecture 2.07 */ #define PCR_ARCH_206 0x4 /* Architecture 2.06 */ #define PCR_ARCH_205 0x2 /* Architecture 2.05 */ -#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205) +#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205 | PCR_ARCH_300) #define PCR_MASK ~(PCR_HIGH_BITS | PCR_LOW_BITS) /* PCR Reserved Bits */ #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index db07199f0977..a0cf17597838 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -344,9 +344,6 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) vcpu->arch.pvr = pvr; } -/* Dummy value used in computing PCR value below */ -#define PCR_ARCH_300 (PCR_ARCH_207 << 1) - static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) { unsigned long host_pcr_bit = 0, guest_pcr_bit = 0; -- cgit v1.2.3-59-g8ed1b From 43d0d37acbe40a9a93d9891ca670638cd22116b1 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 21 May 2020 11:43:37 +1000 Subject: powerpc/dt_cpu_ftrs: Advertise support for ISA v3.1 if selected On powernv hardware support for ISAv3.1 is advertised via a cpu feature bit in the device tree. This patch enables the associated HWCAP bit if the device tree indicates ISAv3.1 is available. Signed-off-by: Alistair Popple Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200521014341.29095-4-alistair@popple.id.au --- arch/powerpc/kernel/dt_cpu_ftrs.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index fca4d7ff22b9..46a85584e20b 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -26,6 +26,7 @@ /* Device-tree visible constants follow */ #define ISA_V2_07B 2070 #define ISA_V3_0B 3000 +#define ISA_V3_1 3100 #define USABLE_PR (1U << 0) #define USABLE_OS (1U << 1) @@ -662,6 +663,11 @@ static void __init cpufeatures_setup_start(u32 isa) cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300; cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00; } + + if (isa >= 3100) { + cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_31; + cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_1; + } } static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f) -- cgit v1.2.3-59-g8ed1b From c63d688c3dabca973c5a7da73d17422ad13f3737 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 21 May 2020 11:43:39 +1000 Subject: powerpc/dt_cpu_ftrs: Enable Prefixed Instructions Prefix instructions have their own FSCR bit which needs to be enabled via a CPU feature. The kernel will save the FSCR for problem state but it needs to be enabled initially. Signed-off-by: Alistair Popple Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200521014341.29095-6-alistair@popple.id.au --- arch/powerpc/kernel/dt_cpu_ftrs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 46a85584e20b..87a5d47415eb 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -634,6 +634,7 @@ static struct dt_cpu_feature_match __initdata {"vector-binary128", feat_enable, 0}, {"vector-binary16", feat_enable, 0}, {"wait-v3", feat_enable, 0}, + {"prefix-instructions", feat_enable, 0}, }; static bool __initdata using_dt_cpu_ftrs; -- cgit v1.2.3-59-g8ed1b From 87939d50e5888bd78478d9aa9455f56b919df658 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 21 May 2020 11:43:40 +1000 Subject: powerpc/dt_cpu_ftrs: Add MMA feature Matrix multiple assist (MMA) is a new feature added to ISAv3.1 and POWER10. Support on powernv can be selected via a firmware CPU device tree feature which enables it via a PCR bit. Signed-off-by: Alistair Popple Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200521014341.29095-7-alistair@popple.id.au --- arch/powerpc/include/asm/reg.h | 3 ++- arch/powerpc/kernel/dt_cpu_ftrs.c | 17 ++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index dd20af367b57..88e6c78100d9 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -481,7 +481,8 @@ #define PCR_VEC_DIS (__MASK(63-0)) /* Vec. disable (bit NA since POWER8) */ #define PCR_VSX_DIS (__MASK(63-1)) /* VSX disable (bit NA since POWER8) */ #define PCR_TM_DIS (__MASK(63-2)) /* Trans. memory disable (POWER8) */ -#define PCR_HIGH_BITS (PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS) +#define PCR_MMA_DIS (__MASK(63-3)) /* Matrix-Multiply Accelerator */ +#define PCR_HIGH_BITS (PCR_MMA_DIS | PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS) /* * These bits are used in the function kvmppc_set_arch_compat() to specify and * determine both the compatibility level which we want to emulate and the diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 87a5d47415eb..3a409517c031 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -75,6 +75,7 @@ static struct { u64 lpcr_clear; u64 hfscr; u64 fscr; + u64 pcr; } system_registers; static void (*init_pmu_registers)(void); @@ -102,7 +103,7 @@ static void __restore_cpu_cpufeatures(void) if (hv_mode) { mtspr(SPRN_LPID, 0); mtspr(SPRN_HFSCR, system_registers.hfscr); - mtspr(SPRN_PCR, PCR_MASK); + mtspr(SPRN_PCR, system_registers.pcr); } mtspr(SPRN_FSCR, system_registers.fscr); @@ -561,6 +562,18 @@ static int __init feat_enable_large_ci(struct dt_cpu_feature *f) return 1; } +static int __init feat_enable_mma(struct dt_cpu_feature *f) +{ + u64 pcr; + + feat_enable(f); + pcr = mfspr(SPRN_PCR); + pcr &= ~PCR_MMA_DIS; + mtspr(SPRN_PCR, pcr); + + return 1; +} + struct dt_cpu_feature_match { const char *name; int (*enable)(struct dt_cpu_feature *f); @@ -635,6 +648,7 @@ static struct dt_cpu_feature_match __initdata {"vector-binary16", feat_enable, 0}, {"wait-v3", feat_enable, 0}, {"prefix-instructions", feat_enable, 0}, + {"matrix-multiply-assist", feat_enable_mma, 0}, }; static bool __initdata using_dt_cpu_ftrs; @@ -785,6 +799,7 @@ static void __init cpufeatures_setup_finished(void) system_registers.lpcr = mfspr(SPRN_LPCR); system_registers.hfscr = mfspr(SPRN_HFSCR); system_registers.fscr = mfspr(SPRN_FSCR); + system_registers.pcr = mfspr(SPRN_PCR); pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n", cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features); -- cgit v1.2.3-59-g8ed1b From a3ea40d5c7365e7e5c7c85b6f30b15142b397571 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Thu, 21 May 2020 11:43:41 +1000 Subject: powerpc: Add POWER10 architected mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PVR value of 0x0F000006 means we are arch v3.1 compliant (i.e. POWER10). This is used by phyp and kvm when booting as a pseries guest to detect the presence of new P10 features and to enable the appropriate hwcap and facility bits. Signed-off-by: Alistair Popple Signed-off-by: Cédric Le Goater [mpe: Fall through to __init_FSCR rather than duplicating it, drop hack to set current->thread.fscr now that is handled elsewhere.] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200521014341.29095-8-alistair@popple.id.au --- arch/powerpc/include/asm/cputable.h | 15 +++++++++++++-- arch/powerpc/include/asm/mmu.h | 1 + arch/powerpc/include/asm/prom.h | 1 + arch/powerpc/kernel/cpu_setup_power.S | 20 ++++++++++++++++++-- arch/powerpc/kernel/cputable.c | 22 ++++++++++++++++++++++ arch/powerpc/kernel/prom_init.c | 12 ++++++++++-- 6 files changed, 65 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 1559dbf72842..bac2252c839e 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -468,6 +468,17 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \ CPU_FTR_P9_TM_HV_ASSIST | \ CPU_FTR_P9_TM_XER_SO_BUG) +#define CPU_FTRS_POWER10 (CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ + CPU_FTR_MMCRA | CPU_FTR_SMT | \ + CPU_FTR_COHERENT_ICACHE | \ + CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ + CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ + CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \ + CPU_FTR_ARCH_31) #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -486,14 +497,14 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \ - CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2) + CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | CPU_FTRS_POWER10) #else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \ - CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2) + CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | CPU_FTRS_POWER10) #endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index cf2a08bfd5cd..f4ac25d4df05 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -122,6 +122,7 @@ #define MMU_FTRS_POWER7 MMU_FTRS_POWER6 #define MMU_FTRS_POWER8 MMU_FTRS_POWER6 #define MMU_FTRS_POWER9 MMU_FTRS_POWER6 +#define MMU_FTRS_POWER10 MMU_FTRS_POWER6 #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ MMU_FTR_CI_LARGE_PAGE #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 94e3fd54f2c8..324a13351749 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -117,6 +117,7 @@ extern int of_read_drc_info_cell(struct property **prop, #define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */ #define OV1_PPC_3_00 0x80 /* set if we support PowerPC 3.00 */ +#define OV1_PPC_3_1 0x40 /* set if we support PowerPC 3.1 */ /* Option vector 2: Open Firmware options supported */ #define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */ diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index f91ecb10d0ae..efdcfa714106 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -91,10 +91,15 @@ _GLOBAL(__restore_cpu_power8) mtlr r11 blr +_GLOBAL(__setup_cpu_power10) + mflr r11 + bl __init_FSCR_power10 + b 1f + _GLOBAL(__setup_cpu_power9) mflr r11 bl __init_FSCR - bl __init_PMU +1: bl __init_PMU bl __init_hvmode_206 mtlr r11 beqlr @@ -116,10 +121,15 @@ _GLOBAL(__setup_cpu_power9) mtlr r11 blr +_GLOBAL(__restore_cpu_power10) + mflr r11 + bl __init_FSCR_power10 + b 1f + _GLOBAL(__restore_cpu_power9) mflr r11 bl __init_FSCR - bl __init_PMU +1: bl __init_PMU mfmsr r3 rldicl. r0,r3,4,63 mtlr r11 @@ -182,6 +192,12 @@ __init_LPCR_ISA300: isync blr +__init_FSCR_power10: + mfspr r3, SPRN_FSCR + ori r3, r3, FSCR_PREFIX + mtspr SPRN_FSCR, r3 + // fall through + __init_FSCR: mfspr r3,SPRN_FSCR ori r3,r3,FSCR_TAR|FSCR_EBB diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8ed553734919..b4066354f073 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -70,6 +70,8 @@ extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power8(void); extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power9(void); +extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec); +extern void __restore_cpu_power10(void); extern long __machine_check_early_realmode_p7(struct pt_regs *regs); extern long __machine_check_early_realmode_p8(struct pt_regs *regs); extern long __machine_check_early_realmode_p9(struct pt_regs *regs); @@ -119,6 +121,10 @@ extern void __restore_cpu_e6500(void); PPC_FEATURE2_ARCH_3_00 | \ PPC_FEATURE2_HAS_IEEE128 | \ PPC_FEATURE2_DARN ) +#define COMMON_USER_POWER10 COMMON_USER_POWER9 +#define COMMON_USER2_POWER10 (COMMON_USER2_POWER9 | \ + PPC_FEATURE2_ARCH_3_1 | \ + PPC_FEATURE2_MMA) #ifdef CONFIG_PPC_BOOK3E_64 #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) @@ -367,6 +373,22 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_power9, .platform = "power9", }, + { /* 3.1-compliant processor, i.e. Power10 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000006, + .cpu_name = "POWER10 (architected)", + .cpu_features = CPU_FTRS_POWER10, + .cpu_user_features = COMMON_USER_POWER10, + .cpu_user_features2 = COMMON_USER2_POWER10, + .mmu_features = MMU_FTRS_POWER10, + .icache_bsize = 128, + .dcache_bsize = 128, + .oprofile_type = PPC_OPROFILE_INVALID, + .oprofile_cpu_type = "ppc64/ibm-compat-v1", + .cpu_setup = __setup_cpu_power10, + .cpu_restore = __restore_cpu_power10, + .platform = "power10", + }, { /* Power7 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003f0000, diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index e3a9fde51c4f..5f15b10eb007 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -920,7 +920,7 @@ struct option_vector6 { } __packed; struct ibm_arch_vec { - struct { u32 mask, val; } pvrs[12]; + struct { u32 mask, val; } pvrs[14]; u8 num_vectors; @@ -973,6 +973,14 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { .mask = cpu_to_be32(0xffff0000), /* POWER9 */ .val = cpu_to_be32(0x004e0000), }, + { + .mask = cpu_to_be32(0xffff0000), /* POWER10 */ + .val = cpu_to_be32(0x00800000), + }, + { + .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */ + .val = cpu_to_be32(0x0f000006), + }, { .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ .val = cpu_to_be32(0x0f000005), @@ -1002,7 +1010,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { .byte1 = 0, .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, - .arch_versions3 = OV1_PPC_3_00, + .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1, }, .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), -- cgit v1.2.3-59-g8ed1b From f44b85da5e7450d0308695ba6f503d75fe6cc166 Mon Sep 17 00:00:00 2001 From: Andrew Donnellan Date: Tue, 2 Jun 2020 14:03:41 +1000 Subject: cxl: Remove dead Kconfig options The CXL_AFU_DRIVER_OPS and CXL_LIB Kconfig options were added to coordinate merging of new features. They no longer serve any purpose, so remove them. Signed-off-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200602040341.10152-1-ajd@linux.ibm.com --- drivers/misc/cxl/Kconfig | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig index 39eec9031487..51aecafdcbdf 100644 --- a/drivers/misc/cxl/Kconfig +++ b/drivers/misc/cxl/Kconfig @@ -7,18 +7,10 @@ config CXL_BASE bool select PPC_COPRO_BASE -config CXL_AFU_DRIVER_OPS - bool - -config CXL_LIB - bool - config CXL tristate "Support for IBM Coherent Accelerators (CXL)" depends on PPC_POWERNV && PCI_MSI && EEH select CXL_BASE - select CXL_AFU_DRIVER_OPS - select CXL_LIB default m help Select this option to enable driver support for IBM Coherent -- cgit v1.2.3-59-g8ed1b From 4336b9337824a60a0b10013c622caeee99460db5 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Tue, 21 Apr 2020 18:15:39 +1000 Subject: powerpc/pseries: Make vio and ibmebus initcalls pseries specific The vio and ibmebus buses are used for pseries specific paravirtualised devices and currently they're initialised by the generic initcall types. This is mostly fine, but it can result in some nuisance errors in dmesg when booting on PowerNV on some OSes, e.g. [ 2.984439] synth uevent: /devices/vio: failed to send uevent [ 2.984442] vio vio: uevent: failed to send synthetic uevent [ 17.968551] synth uevent: /devices/vio: failed to send uevent [ 17.968554] vio vio: uevent: failed to send synthetic uevent We don't see anything similar for the ibmebus because that depends on !CONFIG_LITTLE_ENDIAN. This patch squashes those by switching to using machine_*_initcall() so the bus type is only registered when the kernel is running on a pseries machine. Signed-off-by: Oliver O'Halloran Reviewed-by: Tyrel Datwyler Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200421081539.7485-1-oohall@gmail.com --- arch/powerpc/platforms/pseries/ibmebus.c | 3 ++- arch/powerpc/platforms/pseries/vio.c | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index b91eb0929ed1..a6f101c958e8 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -47,6 +47,7 @@ #include #include #include +#include static struct device ibmebus_bus_device = { /* fake "parent" device */ .init_name = "ibmebus", @@ -464,4 +465,4 @@ static int __init ibmebus_bus_init(void) return 0; } -postcore_initcall(ibmebus_bus_init); +machine_postcore_initcall(pseries, ibmebus_bus_init); diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 37f1f25ba804..0487b26f6f1a 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -31,6 +31,7 @@ #include #include #include +#include static struct vio_dev vio_bus_device = { /* fake "parent" device */ .name = "vio", @@ -1513,7 +1514,7 @@ static int __init vio_bus_init(void) return 0; } -postcore_initcall(vio_bus_init); +machine_postcore_initcall(pseries, vio_bus_init); static int __init vio_device_init(void) { @@ -1522,7 +1523,7 @@ static int __init vio_device_init(void) return 0; } -device_initcall(vio_device_init); +machine_device_initcall(pseries, vio_device_init); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1703,4 +1704,4 @@ static int __init vio_init(void) dma_debug_add_bus(&vio_bus_type); return 0; } -fs_initcall(vio_init); +machine_fs_initcall(pseries, vio_init); -- cgit v1.2.3-59-g8ed1b