aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c3
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c67
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c5
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c7
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c19
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c11
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c710
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c45
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h324
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c12
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h5
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c6
-rw-r--r--arch/powerpc/platforms/pseries/smp.c20
17 files changed, 822 insertions, 431 deletions
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 8ae010381316..6c61ec5ee914 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o
+obj-$(CONFIG_LPARCFG) += lparcfg.o
ifeq ($(CONFIG_PPC_PSERIES),y)
obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index c638535753df..1e561bef459b 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -40,8 +40,7 @@
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <linux/memory.h>
-
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
#define CMM_DRIVER_VERSION "1.0.0"
#define CMM_DEFAULT_DELAY 1
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a1a7b9a67ffd..7cfdaae1721a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -63,26 +63,32 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
return prop;
}
-static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
+static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
+ const char *path)
{
struct device_node *dn;
char *name;
+ /* If parent node path is "/" advance path to NULL terminator to
+ * prevent double leading slashs in full_name.
+ */
+ if (!path[1])
+ path++;
+
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
return NULL;
- /* The configure connector reported name does not contain a
- * preceding '/', so we allocate a buffer large enough to
- * prepend this to the full_name.
- */
name = (char *)ccwa + ccwa->name_offset;
- dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
+ dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
if (!dn->full_name) {
kfree(dn);
return NULL;
}
+ of_node_set_flag(dn, OF_DYNAMIC);
+ kref_init(&dn->kref);
+
return dn;
}
@@ -120,7 +126,8 @@ void dlpar_free_cc_nodes(struct device_node *dn)
#define CALL_AGAIN -2
#define ERR_CFG_USE -9003
-struct device_node *dlpar_configure_connector(u32 drc_index)
+struct device_node *dlpar_configure_connector(u32 drc_index,
+ struct device_node *parent)
{
struct device_node *dn;
struct device_node *first_dn = NULL;
@@ -129,6 +136,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
struct property *last_property = NULL;
struct cc_workarea *ccwa;
char *data_buf;
+ const char *parent_path = parent->full_name;
int cc_token;
int rc = -1;
@@ -162,7 +170,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
break;
case NEXT_SIBLING:
- dn = dlpar_parse_cc_node(ccwa);
+ dn = dlpar_parse_cc_node(ccwa, parent_path);
if (!dn)
goto cc_error;
@@ -172,13 +180,17 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
break;
case NEXT_CHILD:
- dn = dlpar_parse_cc_node(ccwa);
+ if (first_dn)
+ parent_path = last_dn->full_name;
+
+ dn = dlpar_parse_cc_node(ccwa, parent_path);
if (!dn)
goto cc_error;
- if (!first_dn)
+ if (!first_dn) {
+ dn->parent = parent;
first_dn = dn;
- else {
+ } else {
dn->parent = last_dn;
if (last_dn)
last_dn->child = dn;
@@ -202,6 +214,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
case PREV_PARENT:
last_dn = last_dn->parent;
+ parent_path = last_dn->parent->full_name;
break;
case CALL_AGAIN:
@@ -256,8 +269,6 @@ int dlpar_attach_node(struct device_node *dn)
{
int rc;
- of_node_set_flag(dn, OF_DYNAMIC);
- kref_init(&dn->kref);
dn->parent = derive_parent(dn->full_name);
if (!dn->parent)
return -ENOMEM;
@@ -275,8 +286,15 @@ int dlpar_attach_node(struct device_node *dn)
int dlpar_detach_node(struct device_node *dn)
{
+ struct device_node *child;
int rc;
+ child = of_get_next_child(dn, NULL);
+ while (child) {
+ dlpar_detach_node(child);
+ child = of_get_next_child(dn, child);
+ }
+
rc = of_detach_node(dn);
if (rc)
return rc;
@@ -382,9 +400,8 @@ out:
static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
{
- struct device_node *dn;
+ struct device_node *dn, *parent;
unsigned long drc_index;
- char *cpu_name;
int rc;
cpu_hotplug_driver_lock();
@@ -394,25 +411,19 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
goto out;
}
- dn = dlpar_configure_connector(drc_index);
- if (!dn) {
- rc = -EINVAL;
+ parent = of_find_node_by_path("/cpus");
+ if (!parent) {
+ rc = -ENODEV;
goto out;
}
- /* configure-connector reports cpus as living in the base
- * directory of the device tree. CPUs actually live in the
- * cpus directory so we need to fixup the full_name.
- */
- cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
- if (!cpu_name) {
- dlpar_free_cc_nodes(dn);
- rc = -ENOMEM;
+ dn = dlpar_configure_connector(drc_index, parent);
+ if (!dn) {
+ rc = -EINVAL;
goto out;
}
- kfree(dn->full_name);
- dn->full_name = cpu_name;
+ of_node_put(parent);
rc = dlpar_acquire_drc(drc_index);
if (rc) {
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 0cc0ac07a55d..5db66f1fbc26 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -29,8 +29,7 @@
#include <asm/firmware.h>
#include <asm/lppaca.h>
#include <asm/debug.h>
-
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
struct dtl {
struct dtl_entry *buf;
@@ -87,7 +86,7 @@ static void consume_dtle(struct dtl_entry *dtle, u64 index)
barrier();
/* check for hypervisor ring buffer overflow, ignore this entry if so */
- if (index + N_DISPATCH_LOG < vpa->dtl_idx)
+ if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
return;
++wp;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 217ca5c75b20..82789e79e539 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -30,7 +30,8 @@
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/xics.h>
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
+
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
@@ -123,7 +124,7 @@ static void pseries_mach_cpu_die(void)
cede_latency_hint = 2;
get_lppaca()->idle = 1;
- if (!get_lppaca()->shared_proc)
+ if (!lppaca_shared_proc(get_lppaca()))
get_lppaca()->donate_dedicated_cpu = 1;
while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
@@ -137,7 +138,7 @@ static void pseries_mach_cpu_die(void)
local_irq_disable();
- if (!get_lppaca()->shared_proc)
+ if (!lppaca_shared_proc(get_lppaca()))
get_lppaca()->donate_dedicated_cpu = 0;
get_lppaca()->idle = 0;
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index b344f94b0400..849b29b3e9ae 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -28,7 +28,7 @@
#include <linux/errno.h>
#include <asm/hvcall.h>
#include <asm/hvconsole.h>
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
/**
* hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
@@ -40,10 +40,16 @@
*/
int hvc_get_chars(uint32_t vtermno, char *buf, int count)
{
- unsigned long got;
+ long ret;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned long *lbuf = (unsigned long *)buf;
+
+ ret = plpar_hcall(H_GET_TERM_CHAR, retbuf, vtermno);
+ lbuf[0] = be64_to_cpu(retbuf[1]);
+ lbuf[1] = be64_to_cpu(retbuf[2]);
- if (plpar_get_term_char(vtermno, &got, buf) == H_SUCCESS)
- return got;
+ if (ret == H_SUCCESS)
+ return retbuf[0];
return 0;
}
@@ -69,8 +75,9 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
if (count > MAX_VIO_PUT_CHARS)
count = MAX_VIO_PUT_CHARS;
- ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
- lbuf[1]);
+ ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count,
+ cpu_to_be64(lbuf[0]),
+ cpu_to_be64(lbuf[1]));
if (ret == H_SUCCESS)
return count;
if (ret == H_BUSY)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 23fc1dcf4434..0307901e4132 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -48,8 +48,7 @@
#include <asm/ppc-pci.h>
#include <asm/udbg.h>
#include <asm/mmzone.h>
-
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
@@ -530,7 +529,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
- const void *dma_window)
+ const __be32 *dma_window)
{
unsigned long offset, size;
@@ -630,7 +629,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
struct iommu_table *tbl;
struct device_node *dn, *pdn;
struct pci_dn *ppci;
- const void *dma_window = NULL;
+ const __be32 *dma_window = NULL;
dn = pci_bus_to_OF_node(bus);
@@ -1152,7 +1151,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
struct iommu_table *tbl;
- const void *dma_window = NULL;
+ const __be32 *dma_window = NULL;
struct pci_dn *pci;
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
@@ -1201,7 +1200,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
bool ddw_enabled = false;
struct device_node *pdn, *dn;
struct pci_dev *pdev;
- const void *dma_window = NULL;
+ const __be32 *dma_window = NULL;
u64 dma_offset;
if (!dev->dma_mask)
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 7d94bdc63d50..13fa95b3aa8b 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -17,9 +17,9 @@
#include <asm/mpic.h>
#include <asm/xics.h>
#include <asm/smp.h>
+#include <asm/plpar_wrappers.h>
#include "pseries.h"
-#include "plpar_wrappers.h"
static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8bad880bd177..356bc75ca74f 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -41,8 +41,8 @@
#include <asm/smp.h>
#include <asm/trace.h>
#include <asm/firmware.h>
+#include <asm/plpar_wrappers.h>
-#include "plpar_wrappers.h"
#include "pseries.h"
/* Flag bits for H_BULK_REMOVE */
@@ -68,6 +68,12 @@ void vpa_init(int cpu)
struct paca_struct *pp;
struct dtl_entry *dtl;
+ /*
+ * The spec says it "may be problematic" if CPU x registers the VPA of
+ * CPU y. We should never do that, but wail if we ever do.
+ */
+ WARN_ON(cpu != smp_processor_id());
+
if (cpu_has_feature(CPU_FTR_ALTIVEC))
lppaca_of(cpu).vmxregs_in_use = 1;
@@ -106,7 +112,7 @@ void vpa_init(int cpu)
lppaca_of(cpu).dtl_idx = 0;
/* hypervisor reads buffer length from this field */
- dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
+ dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
ret = register_dtl(hwcpu, __pa(dtl));
if (ret)
pr_err("WARNING: DTL registration of cpu %d (hw %d) "
@@ -724,7 +730,7 @@ int h_get_mpp(struct hvcall_mpp_data *mpp_data)
mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
- mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
+ mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
mpp_data->pool_size = retbuf[4];
mpp_data->loan_request = retbuf[5];
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
new file mode 100644
index 000000000000..e738007eae64
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -0,0 +1,710 @@
+/*
+ * PowerPC64 LPAR Configuration Information Driver
+ *
+ * Dave Engebretsen engebret@us.ibm.com
+ * Copyright (c) 2003 Dave Engebretsen
+ * Will Schmidt willschm@us.ibm.com
+ * SPLPAR updates, Copyright (c) 2003 Will Schmidt IBM Corporation.
+ * seq_file updates, Copyright (c) 2004 Will Schmidt IBM Corporation.
+ * Nathan Lynch nathanl@austin.ibm.com
+ * Added lparcfg_write, Copyright (C) 2004 Nathan Lynch IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This driver creates a proc file at /proc/ppc64/lparcfg which contains
+ * keyword - value pairs that specify the configuration of the partition.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/lppaca.h>
+#include <asm/hvcall.h>
+#include <asm/firmware.h>
+#include <asm/rtas.h>
+#include <asm/time.h>
+#include <asm/prom.h>
+#include <asm/vdso_datapage.h>
+#include <asm/vio.h>
+#include <asm/mmu.h>
+#include <asm/machdep.h>
+
+
+/*
+ * This isn't a module but we expose that to userspace
+ * via /proc so leave the definitions here
+ */
+#define MODULE_VERS "1.9"
+#define MODULE_NAME "lparcfg"
+
+/* #define LPARCFG_DEBUG */
+
+/*
+ * Track sum of all purrs across all processors. This is used to further
+ * calculate usage values by different applications
+ */
+static unsigned long get_purr(void)
+{
+ unsigned long sum_purr = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_usage *cu;
+
+ cu = &per_cpu(cpu_usage_array, cpu);
+ sum_purr += cu->current_tb;
+ }
+ return sum_purr;
+}
+
+/*
+ * Methods used to fetch LPAR data when running on a pSeries platform.
+ */
+
+struct hvcall_ppp_data {
+ u64 entitlement;
+ u64 unallocated_entitlement;
+ u16 group_num;
+ u16 pool_num;
+ u8 capped;
+ u8 weight;
+ u8 unallocated_weight;
+ u16 active_procs_in_pool;
+ u16 active_system_procs;
+ u16 phys_platform_procs;
+ u32 max_proc_cap_avail;
+ u32 entitled_proc_cap_avail;
+};
+
+/*
+ * H_GET_PPP hcall returns info in 4 parms.
+ * entitled_capacity,unallocated_capacity,
+ * aggregation, resource_capability).
+ *
+ * R4 = Entitled Processor Capacity Percentage.
+ * R5 = Unallocated Processor Capacity Percentage.
+ * R6 (AABBCCDDEEFFGGHH).
+ * XXXX - reserved (0)
+ * XXXX - reserved (0)
+ * XXXX - Group Number
+ * XXXX - Pool Number.
+ * R7 (IIJJKKLLMMNNOOPP).
+ * XX - reserved. (0)
+ * XX - bit 0-6 reserved (0). bit 7 is Capped indicator.
+ * XX - variable processor Capacity Weight
+ * XX - Unallocated Variable Processor Capacity Weight.
+ * XXXX - Active processors in Physical Processor Pool.
+ * XXXX - Processors active on platform.
+ * R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
+ * XXXX - Physical platform procs allocated to virtualization.
+ * XXXXXX - Max procs capacity % available to the partitions pool.
+ * XXXXXX - Entitled procs capacity % available to the
+ * partitions pool.
+ */
+static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
+{
+ unsigned long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9(H_GET_PPP, retbuf);
+
+ ppp_data->entitlement = retbuf[0];
+ ppp_data->unallocated_entitlement = retbuf[1];
+
+ ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
+ ppp_data->pool_num = retbuf[2] & 0xffff;
+
+ ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
+ ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
+ ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
+ ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
+ ppp_data->active_system_procs = retbuf[3] & 0xffff;
+
+ ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
+ ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
+ ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
+
+ return rc;
+}
+
+static unsigned h_pic(unsigned long *pool_idle_time,
+ unsigned long *num_procs)
+{
+ unsigned long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_PIC, retbuf);
+
+ *pool_idle_time = retbuf[0];
+ *num_procs = retbuf[1];
+
+ return rc;
+}
+
+/*
+ * parse_ppp_data
+ * Parse out the data returned from h_get_ppp and h_pic
+ */
+static void parse_ppp_data(struct seq_file *m)
+{
+ struct hvcall_ppp_data ppp_data;
+ struct device_node *root;
+ const int *perf_level;
+ int rc;
+
+ rc = h_get_ppp(&ppp_data);
+ if (rc)
+ return;
+
+ seq_printf(m, "partition_entitled_capacity=%lld\n",
+ ppp_data.entitlement);
+ seq_printf(m, "group=%d\n", ppp_data.group_num);
+ seq_printf(m, "system_active_processors=%d\n",
+ ppp_data.active_system_procs);
+
+ /* pool related entries are appropriate for shared configs */
+ if (lppaca_shared_proc(get_lppaca())) {
+ unsigned long pool_idle_time, pool_procs;
+
+ seq_printf(m, "pool=%d\n", ppp_data.pool_num);
+
+ /* report pool_capacity in percentage */
+ seq_printf(m, "pool_capacity=%d\n",
+ ppp_data.active_procs_in_pool * 100);
+
+ h_pic(&pool_idle_time, &pool_procs);
+ seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
+ seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
+ }
+
+ seq_printf(m, "unallocated_capacity_weight=%d\n",
+ ppp_data.unallocated_weight);
+ seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
+ seq_printf(m, "capped=%d\n", ppp_data.capped);
+ seq_printf(m, "unallocated_capacity=%lld\n",
+ ppp_data.unallocated_entitlement);
+
+ /* The last bits of information returned from h_get_ppp are only
+ * valid if the ibm,partition-performance-parameters-level
+ * property is >= 1.
+ */
+ root = of_find_node_by_path("/");
+ if (root) {
+ perf_level = of_get_property(root,
+ "ibm,partition-performance-parameters-level",
+ NULL);
+ if (perf_level && (*perf_level >= 1)) {
+ seq_printf(m,
+ "physical_procs_allocated_to_virtualization=%d\n",
+ ppp_data.phys_platform_procs);
+ seq_printf(m, "max_proc_capacity_available=%d\n",
+ ppp_data.max_proc_cap_avail);
+ seq_printf(m, "entitled_proc_capacity_available=%d\n",
+ ppp_data.entitled_proc_cap_avail);
+ }
+
+ of_node_put(root);
+ }
+}
+
+/**
+ * parse_mpp_data
+ * Parse out data returned from h_get_mpp
+ */
+static void parse_mpp_data(struct seq_file *m)
+{
+ struct hvcall_mpp_data mpp_data;
+ int rc;
+
+ rc = h_get_mpp(&mpp_data);
+ if (rc)
+ return;
+
+ seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
+
+ if (mpp_data.mapped_mem != -1)
+ seq_printf(m, "mapped_entitled_memory=%ld\n",
+ mpp_data.mapped_mem);
+
+ seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
+ seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
+
+ seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
+ seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
+ mpp_data.unallocated_mem_weight);
+ seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
+ mpp_data.unallocated_entitlement);
+
+ if (mpp_data.pool_size != -1)
+ seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
+ mpp_data.pool_size);
+
+ seq_printf(m, "entitled_memory_loan_request=%ld\n",
+ mpp_data.loan_request);
+
+ seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
+}
+
+/**
+ * parse_mpp_x_data
+ * Parse out data returned from h_get_mpp_x
+ */
+static void parse_mpp_x_data(struct seq_file *m)
+{
+ struct hvcall_mpp_x_data mpp_x_data;
+
+ if (!firmware_has_feature(FW_FEATURE_XCMO))
+ return;
+ if (h_get_mpp_x(&mpp_x_data))
+ return;
+
+ seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
+
+ if (mpp_x_data.pool_coalesced_bytes)
+ seq_printf(m, "pool_coalesced_bytes=%ld\n",
+ mpp_x_data.pool_coalesced_bytes);
+ if (mpp_x_data.pool_purr_cycles)
+ seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
+ if (mpp_x_data.pool_spurr_cycles)
+ seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
+}
+
+#define SPLPAR_CHARACTERISTICS_TOKEN 20
+#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
+
+/*
+ * parse_system_parameter_string()
+ * Retrieve the potential_processors, max_entitled_capacity and friends
+ * through the get-system-parameter rtas call. Replace keyword strings as
+ * necessary.
+ */
+static void parse_system_parameter_string(struct seq_file *m)
+{
+ int call_status;
+
+ unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
+ if (!local_buffer) {
+ printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
+ __FILE__, __func__, __LINE__);
+ return;
+ }
+
+ spin_lock(&rtas_data_buf_lock);
+ memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
+ call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
+ NULL,
+ SPLPAR_CHARACTERISTICS_TOKEN,
+ __pa(rtas_data_buf),
+ RTAS_DATA_BUF_SIZE);
+ memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
+ local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
+ spin_unlock(&rtas_data_buf_lock);
+
+ if (call_status != 0) {
+ printk(KERN_INFO
+ "%s %s Error calling get-system-parameter (0x%x)\n",
+ __FILE__, __func__, call_status);
+ } else {
+ int splpar_strlen;
+ int idx, w_idx;
+ char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
+ if (!workbuffer) {
+ printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
+ __FILE__, __func__, __LINE__);
+ kfree(local_buffer);
+ return;
+ }
+#ifdef LPARCFG_DEBUG
+ printk(KERN_INFO "success calling get-system-parameter\n");
+#endif
+ splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
+ local_buffer += 2; /* step over strlen value */
+
+ w_idx = 0;
+ idx = 0;
+ while ((*local_buffer) && (idx < splpar_strlen)) {
+ workbuffer[w_idx++] = local_buffer[idx++];
+ if ((local_buffer[idx] == ',')
+ || (local_buffer[idx] == '\0')) {
+ workbuffer[w_idx] = '\0';
+ if (w_idx) {
+ /* avoid the empty string */
+ seq_printf(m, "%s\n", workbuffer);
+ }
+ memset(workbuffer, 0, SPLPAR_MAXLENGTH);
+ idx++; /* skip the comma */
+ w_idx = 0;
+ } else if (local_buffer[idx] == '=') {
+ /* code here to replace workbuffer contents
+ with different keyword strings */
+ if (0 == strcmp(workbuffer, "MaxEntCap")) {
+ strcpy(workbuffer,
+ "partition_max_entitled_capacity");
+ w_idx = strlen(workbuffer);
+ }
+ if (0 == strcmp(workbuffer, "MaxPlatProcs")) {
+ strcpy(workbuffer,
+ "system_potential_processors");
+ w_idx = strlen(workbuffer);
+ }
+ }
+ }
+ kfree(workbuffer);
+ local_buffer -= 2; /* back up over strlen value */
+ }
+ kfree(local_buffer);
+}
+
+/* Return the number of processors in the system.
+ * This function reads through the device tree and counts
+ * the virtual processors, this does not include threads.
+ */
+static int lparcfg_count_active_processors(void)
+{
+ struct device_node *cpus_dn = NULL;
+ int count = 0;
+
+ while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) {
+#ifdef LPARCFG_DEBUG
+ printk(KERN_ERR "cpus_dn %p\n", cpus_dn);
+#endif
+ count++;
+ }
+ return count;
+}
+
+static void pseries_cmo_data(struct seq_file *m)
+{
+ int cpu;
+ unsigned long cmo_faults = 0;
+ unsigned long cmo_fault_time = 0;
+
+ seq_printf(m, "cmo_enabled=%d\n", firmware_has_feature(FW_FEATURE_CMO));
+
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
+ cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
+ }
+
+ seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
+ seq_printf(m, "cmo_fault_time_usec=%lu\n",
+ cmo_fault_time / tb_ticks_per_usec);
+ seq_printf(m, "cmo_primary_psp=%d\n", cmo_get_primary_psp());
+ seq_printf(m, "cmo_secondary_psp=%d\n", cmo_get_secondary_psp());
+ seq_printf(m, "cmo_page_size=%lu\n", cmo_get_page_size());
+}
+
+static void splpar_dispatch_data(struct seq_file *m)
+{
+ int cpu;
+ unsigned long dispatches = 0;
+ unsigned long dispatch_dispersions = 0;
+
+ for_each_possible_cpu(cpu) {
+ dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
+ dispatch_dispersions +=
+ be32_to_cpu(lppaca_of(cpu).dispersion_count);
+ }
+
+ seq_printf(m, "dispatches=%lu\n", dispatches);
+ seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
+}
+
+static void parse_em_data(struct seq_file *m)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
+}
+
+static int pseries_lparcfg_data(struct seq_file *m, void *v)
+{
+ int partition_potential_processors;
+ int partition_active_processors;
+ struct device_node *rtas_node;
+ const int *lrdrp = NULL;
+
+ rtas_node = of_find_node_by_path("/rtas");
+ if (rtas_node)
+ lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL);
+
+ if (lrdrp == NULL) {
+ partition_potential_processors = vdso_data->processorCount;
+ } else {
+ partition_potential_processors = *(lrdrp + 4);
+ }
+ of_node_put(rtas_node);
+
+ partition_active_processors = lparcfg_count_active_processors();
+
+ if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ /* this call handles the ibm,get-system-parameter contents */
+ parse_system_parameter_string(m);
+ parse_ppp_data(m);
+ parse_mpp_data(m);
+ parse_mpp_x_data(m);
+ pseries_cmo_data(m);
+ splpar_dispatch_data(m);
+
+ seq_printf(m, "purr=%ld\n", get_purr());
+ } else { /* non SPLPAR case */
+
+ seq_printf(m, "system_active_processors=%d\n",
+ partition_potential_processors);
+
+ seq_printf(m, "system_potential_processors=%d\n",
+ partition_potential_processors);
+
+ seq_printf(m, "partition_max_entitled_capacity=%d\n",
+ partition_potential_processors * 100);
+
+ seq_printf(m, "partition_entitled_capacity=%d\n",
+ partition_active_processors * 100);
+ }
+
+ seq_printf(m, "partition_active_processors=%d\n",
+ partition_active_processors);
+
+ seq_printf(m, "partition_potential_processors=%d\n",
+ partition_potential_processors);
+
+ seq_printf(m, "shared_processor_mode=%d\n",
+ lppaca_shared_proc(get_lppaca()));
+
+ seq_printf(m, "slb_size=%d\n", mmu_slb_size);
+
+ parse_em_data(m);
+
+ return 0;
+}
+
+static ssize_t update_ppp(u64 *entitlement, u8 *weight)
+{
+ struct hvcall_ppp_data ppp_data;
+ u8 new_weight;
+ u64 new_entitled;
+ ssize_t retval;
+
+ /* Get our current parameters */
+ retval = h_get_ppp(&ppp_data);
+ if (retval)
+ return retval;
+
+ if (entitlement) {
+ new_weight = ppp_data.weight;
+ new_entitled = *entitlement;
+ } else if (weight) {
+ new_weight = *weight;
+ new_entitled = ppp_data.entitlement;
+ } else
+ return -EINVAL;
+
+ pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
+ __func__, ppp_data.entitlement, ppp_data.weight);
+
+ pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
+ __func__, new_entitled, new_weight);
+
+ retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
+ return retval;
+}
+
+/**
+ * update_mpp
+ *
+ * Update the memory entitlement and weight for the partition. Caller must
+ * specify either a new entitlement or weight, not both, to be updated
+ * since the h_set_mpp call takes both entitlement and weight as parameters.
+ */
+static ssize_t update_mpp(u64 *entitlement, u8 *weight)
+{
+ struct hvcall_mpp_data mpp_data;
+ u64 new_entitled;
+ u8 new_weight;
+ ssize_t rc;
+
+ if (entitlement) {
+ /* Check with vio to ensure the new memory entitlement
+ * can be handled.
+ */
+ rc = vio_cmo_entitlement_update(*entitlement);
+ if (rc)
+ return rc;
+ }
+
+ rc = h_get_mpp(&mpp_data);
+ if (rc)
+ return rc;
+
+ if (entitlement) {
+ new_weight = mpp_data.mem_weight;
+ new_entitled = *entitlement;
+ } else if (weight) {
+ new_weight = *weight;
+ new_entitled = mpp_data.entitled_mem;
+ } else
+ return -EINVAL;
+
+ pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
+ __func__, mpp_data.entitled_mem, mpp_data.mem_weight);
+
+ pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
+ __func__, new_entitled, new_weight);
+
+ rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
+ return rc;
+}
+
+/*
+ * Interface for changing system parameters (variable capacity weight
+ * and entitled capacity). Format of input is "param_name=value";
+ * anything after value is ignored. Valid parameters at this time are
+ * "partition_entitled_capacity" and "capacity_weight". We use
+ * H_SET_PPP to alter parameters.
+ *
+ * This function should be invoked only on systems with
+ * FW_FEATURE_SPLPAR.
+ */
+static ssize_t lparcfg_write(struct file *file, const char __user * buf,
+ size_t count, loff_t * off)
+{
+ int kbuf_sz = 64;
+ char kbuf[kbuf_sz];
+ char *tmp;
+ u64 new_entitled, *new_entitled_ptr = &new_entitled;
+ u8 new_weight, *new_weight_ptr = &new_weight;
+ ssize_t retval;
+
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ return -EINVAL;
+
+ if (count > kbuf_sz)
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count - 1] = '\0';
+ tmp = strchr(kbuf, '=');
+ if (!tmp)
+ return -EINVAL;
+
+ *tmp++ = '\0';
+
+ if (!strcmp(kbuf, "partition_entitled_capacity")) {
+ char *endp;
+ *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
+ if (endp == tmp)
+ return -EINVAL;
+
+ retval = update_ppp(new_entitled_ptr, NULL);
+ } else if (!strcmp(kbuf, "capacity_weight")) {
+ char *endp;
+ *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
+ if (endp == tmp)
+ return -EINVAL;
+
+ retval = update_ppp(NULL, new_weight_ptr);
+ } else if (!strcmp(kbuf, "entitled_memory")) {
+ char *endp;
+ *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
+ if (endp == tmp)
+ return -EINVAL;
+
+ retval = update_mpp(new_entitled_ptr, NULL);
+ } else if (!strcmp(kbuf, "entitled_memory_weight")) {
+ char *endp;
+ *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
+ if (endp == tmp)
+ return -EINVAL;
+
+ retval = update_mpp(NULL, new_weight_ptr);
+ } else
+ return -EINVAL;
+
+ if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
+ retval = count;
+ } else if (retval == H_BUSY) {
+ retval = -EBUSY;
+ } else if (retval == H_HARDWARE) {
+ retval = -EIO;
+ } else if (retval == H_PARAMETER) {
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+
+static int lparcfg_data(struct seq_file *m, void *v)
+{
+ struct device_node *rootdn;
+ const char *model = "";
+ const char *system_id = "";
+ const char *tmp;
+ const unsigned int *lp_index_ptr;
+ unsigned int lp_index = 0;
+
+ seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
+
+ rootdn = of_find_node_by_path("/");
+ if (rootdn) {
+ tmp = of_get_property(rootdn, "model", NULL);
+ if (tmp)
+ model = tmp;
+ tmp = of_get_property(rootdn, "system-id", NULL);
+ if (tmp)
+ system_id = tmp;
+ lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
+ NULL);
+ if (lp_index_ptr)
+ lp_index = *lp_index_ptr;
+ of_node_put(rootdn);
+ }
+ seq_printf(m, "serial_number=%s\n", system_id);
+ seq_printf(m, "system_type=%s\n", model);
+ seq_printf(m, "partition_id=%d\n", (int)lp_index);
+
+ return pseries_lparcfg_data(m, v);
+}
+
+static int lparcfg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lparcfg_data, NULL);
+}
+
+static const struct file_operations lparcfg_fops = {
+ .read = seq_read,
+ .write = lparcfg_write,
+ .open = lparcfg_open,
+ .release = single_release,
+ .llseek = seq_lseek,
+};
+
+static int __init lparcfg_init(void)
+{
+ umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+
+ /* Allow writing if we have FW_FEATURE_SPLPAR */
+ if (firmware_has_feature(FW_FEATURE_SPLPAR))
+ mode |= S_IWUSR;
+
+ if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops)) {
+ printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
+ return -EIO;
+ }
+ return 0;
+}
+machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 3d01eee9ffb1..cde4e0a095ae 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -28,7 +28,7 @@ struct update_props_workarea {
u32 state;
u64 reserved;
u32 nprops;
-};
+} __packed;
#define NODE_ACTION_MASK 0xff000000
#define NODE_COUNT_MASK 0x00ffffff
@@ -62,6 +62,7 @@ static int delete_dt_node(u32 phandle)
return -ENOENT;
dlpar_detach_node(dn);
+ of_node_put(dn);
return 0;
}
@@ -119,7 +120,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
if (!more) {
of_update_property(dn, new_prop);
- new_prop = NULL;
+ *prop = NULL;
}
return 0;
@@ -130,7 +131,7 @@ static int update_dt_node(u32 phandle, s32 scope)
struct update_props_workarea *upwa;
struct device_node *dn;
struct property *prop = NULL;
- int i, rc;
+ int i, rc, rtas_rc;
char *prop_data;
char *rtas_buf;
int update_properties_token;
@@ -154,25 +155,26 @@ static int update_dt_node(u32 phandle, s32 scope)
upwa->phandle = phandle;
do {
- rc = mobility_rtas_call(update_properties_token, rtas_buf,
+ rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
scope);
- if (rc < 0)
+ if (rtas_rc < 0)
break;
prop_data = rtas_buf + sizeof(*upwa);
- /* The first element of the buffer is the path of the node
- * being updated in the form of a 8 byte string length
- * followed by the string. Skip past this to get to the
- * properties being updated.
+ /* On the first call to ibm,update-properties for a node the
+ * the first property value descriptor contains an empty
+ * property name, the property value length encoded as u32,
+ * and the property value is the node path being updated.
*/
- vd = *prop_data++;
- prop_data += vd;
+ if (*prop_data == 0) {
+ prop_data++;
+ vd = *(u32 *)prop_data;
+ prop_data += vd + sizeof(vd);
+ upwa->nprops--;
+ }
- /* The path we skipped over is counted as one of the elements
- * returned so start counting at one.
- */
- for (i = 1; i < upwa->nprops; i++) {
+ for (i = 0; i < upwa->nprops; i++) {
char *prop_name;
prop_name = prop_data;
@@ -202,7 +204,7 @@ static int update_dt_node(u32 phandle, s32 scope)
prop_data += vd;
}
}
- } while (rc == 1);
+ } while (rtas_rc == 1);
of_node_put(dn);
kfree(rtas_buf);
@@ -215,17 +217,14 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
struct device_node *parent_dn;
int rc;
- dn = dlpar_configure_connector(drc_index);
- if (!dn)
+ parent_dn = of_find_node_by_phandle(parent_phandle);
+ if (!parent_dn)
return -ENOENT;
- parent_dn = of_find_node_by_phandle(parent_phandle);
- if (!parent_dn) {
- dlpar_free_cc_nodes(dn);
+ dn = dlpar_configure_connector(drc_index, parent_dn);
+ if (!dn)
return -ENOENT;
- }
- dn->parent = parent_dn;
rc = dlpar_attach_node(dn);
if (rc)
dlpar_free_cc_nodes(dn);
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
deleted file mode 100644
index f35787b6a5e0..000000000000
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ /dev/null
@@ -1,324 +0,0 @@
-#ifndef _PSERIES_PLPAR_WRAPPERS_H
-#define _PSERIES_PLPAR_WRAPPERS_H
-
-#include <linux/string.h>
-#include <linux/irqflags.h>
-
-#include <asm/hvcall.h>
-#include <asm/paca.h>
-#include <asm/page.h>
-
-/* Get state of physical CPU from query_cpu_stopped */
-int smp_query_cpu_stopped(unsigned int pcpu);
-#define QCSS_STOPPED 0
-#define QCSS_STOPPING 1
-#define QCSS_NOT_STOPPED 2
-#define QCSS_HARDWARE_ERROR -1
-#define QCSS_HARDWARE_BUSY -2
-
-static inline long poll_pending(void)
-{
- return plpar_hcall_norets(H_POLL_PENDING);
-}
-
-static inline u8 get_cede_latency_hint(void)
-{
- return get_lppaca()->cede_latency_hint;
-}
-
-static inline void set_cede_latency_hint(u8 latency_hint)
-{
- get_lppaca()->cede_latency_hint = latency_hint;
-}
-
-static inline long cede_processor(void)
-{
- return plpar_hcall_norets(H_CEDE);
-}
-
-static inline long extended_cede_processor(unsigned long latency_hint)
-{
- long rc;
- u8 old_latency_hint = get_cede_latency_hint();
-
- set_cede_latency_hint(latency_hint);
-
- rc = cede_processor();
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Ensure that H_CEDE returns with IRQs on */
- if (WARN_ON(!(mfmsr() & MSR_EE)))
- __hard_irq_enable();
-#endif
-
- set_cede_latency_hint(old_latency_hint);
-
- return rc;
-}
-
-static inline long vpa_call(unsigned long flags, unsigned long cpu,
- unsigned long vpa)
-{
- flags = flags << H_VPA_FUNC_SHIFT;
-
- return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
-}
-
-static inline long unregister_vpa(unsigned long cpu)
-{
- return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
-}
-
-static inline long register_vpa(unsigned long cpu, unsigned long vpa)
-{
- return vpa_call(H_VPA_REG_VPA, cpu, vpa);
-}
-
-static inline long unregister_slb_shadow(unsigned long cpu)
-{
- return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
-}
-
-static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
-{
- return vpa_call(H_VPA_REG_SLB, cpu, vpa);
-}
-
-static inline long unregister_dtl(unsigned long cpu)
-{
- return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
-}
-
-static inline long register_dtl(unsigned long cpu, unsigned long vpa)
-{
- return vpa_call(H_VPA_REG_DTL, cpu, vpa);
-}
-
-static inline long plpar_page_set_loaned(unsigned long vpa)
-{
- unsigned long cmo_page_sz = cmo_get_page_size();
- long rc = 0;
- int i;
-
- for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
- rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
-
- for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
- plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
- vpa + i - cmo_page_sz, 0);
-
- return rc;
-}
-
-static inline long plpar_page_set_active(unsigned long vpa)
-{
- unsigned long cmo_page_sz = cmo_get_page_size();
- long rc = 0;
- int i;
-
- for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
- rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
-
- for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
- plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
- vpa + i - cmo_page_sz, 0);
-
- return rc;
-}
-
-extern void vpa_init(int cpu);
-
-static inline long plpar_pte_enter(unsigned long flags,
- unsigned long hpte_group, unsigned long hpte_v,
- unsigned long hpte_r, unsigned long *slot)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
-
- *slot = retbuf[0];
-
- return rc;
-}
-
-static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
- unsigned long avpn, unsigned long *old_pteh_ret,
- unsigned long *old_ptel_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
-
- *old_pteh_ret = retbuf[0];
- *old_ptel_ret = retbuf[1];
-
- return rc;
-}
-
-/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
-static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
- unsigned long avpn, unsigned long *old_pteh_ret,
- unsigned long *old_ptel_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
-
- *old_pteh_ret = retbuf[0];
- *old_ptel_ret = retbuf[1];
-
- return rc;
-}
-
-static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
- unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_READ, retbuf, flags, ptex);
-
- *old_pteh_ret = retbuf[0];
- *old_ptel_ret = retbuf[1];
-
- return rc;
-}
-
-/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
-static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
- unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
-
- *old_pteh_ret = retbuf[0];
- *old_ptel_ret = retbuf[1];
-
- return rc;
-}
-
-/*
- * plpar_pte_read_4_raw can be called in real mode.
- * ptes must be 8*sizeof(unsigned long)
- */
-static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
- unsigned long *ptes)
-
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
-
- rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
-
- memcpy(ptes, retbuf, 8*sizeof(unsigned long));
-
- return rc;
-}
-
-static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
- unsigned long avpn)
-{
- return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
-}
-
-static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
- unsigned long *tce_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
-
- *tce_ret = retbuf[0];
-
- return rc;
-}
-
-static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
- unsigned long tceval)
-{
- return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
-}
-
-static inline long plpar_tce_put_indirect(unsigned long liobn,
- unsigned long ioba, unsigned long page, unsigned long count)
-{
- return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
-}
-
-static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
- unsigned long tceval, unsigned long count)
-{
- return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
-}
-
-static inline long plpar_get_term_char(unsigned long termno,
- unsigned long *len_ret, char *buf_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- unsigned long *lbuf = (unsigned long *)buf_ret; /* TODO: alignment? */
-
- rc = plpar_hcall(H_GET_TERM_CHAR, retbuf, termno);
-
- *len_ret = retbuf[0];
- lbuf[0] = retbuf[1];
- lbuf[1] = retbuf[2];
-
- return rc;
-}
-
-static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
- const char *buffer)
-{
- unsigned long *lbuf = (unsigned long *)buffer; /* TODO: alignment? */
- return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
- lbuf[1]);
-}
-
-/* Set various resource mode parameters */
-static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
- unsigned long value1, unsigned long value2)
-{
- return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
-}
-
-/*
- * Enable relocation on exceptions on this partition
- *
- * Note: this call has a partition wide scope and can take a while to complete.
- * If it returns H_LONG_BUSY_* it should be retried periodically until it
- * returns H_SUCCESS.
- */
-static inline long enable_reloc_on_exceptions(void)
-{
- /* mflags = 3: Exceptions at 0xC000000000004000 */
- return plpar_set_mode(3, 3, 0, 0);
-}
-
-/*
- * Disable relocation on exceptions on this partition
- *
- * Note: this call has a partition wide scope and can take a while to complete.
- * If it returns H_LONG_BUSY_* it should be retried periodically until it
- * returns H_SUCCESS.
- */
-static inline long disable_reloc_on_exceptions(void) {
- return plpar_set_mode(0, 3, 0, 0);
-}
-
-static inline long plapr_set_ciabr(unsigned long ciabr)
-{
- return plpar_set_mode(0, 1, ciabr, 0);
-}
-
-static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
-{
- return plpar_set_mode(0, 2, dawr0, dawrx0);
-}
-
-#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 4644efa06941..a166e38bd683 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -18,9 +18,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/runlatch.h>
-
-#include "plpar_wrappers.h"
-#include "pseries.h"
+#include <asm/plpar_wrappers.h>
struct cpuidle_driver pseries_idle_driver = {
.name = "pseries_idle",
@@ -45,7 +43,11 @@ static inline void idle_loop_prolog(unsigned long *in_purr)
static inline void idle_loop_epilog(unsigned long in_purr)
{
- get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
+ u64 wait_cycles;
+
+ wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
+ wait_cycles += mfspr(SPRN_PURR) - in_purr;
+ get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
get_lppaca()->idle = 0;
}
@@ -308,7 +310,7 @@ static int pseries_idle_probe(void)
return -EPERM;
}
- if (get_lppaca()->shared_proc)
+ if (lppaca_shared_proc(get_lppaca()))
cpuidle_state_table = shared_states;
else
cpuidle_state_table = dedicated_states;
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index c2a3a258001c..99219530ea4a 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -56,13 +56,10 @@ extern void hvc_vio_init_early(void);
/* Dynamic logical Partitioning/Mobility */
extern void dlpar_free_cc_nodes(struct device_node *);
extern void dlpar_free_cc_property(struct property *);
-extern struct device_node *dlpar_configure_connector(u32);
+extern struct device_node *dlpar_configure_connector(u32, struct device_node *);
extern int dlpar_attach_node(struct device_node *);
extern int dlpar_detach_node(struct device_node *);
-/* Snooze Delay, pseries_idle */
-DECLARE_PER_CPU(long, smt_snooze_delay);
-
/* PCI root bridge prepare function override for pseries */
struct pci_host_bridge;
int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index a91e6dadda2c..92767791f93b 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -108,8 +108,8 @@ err:
* energy consumption.
*/
-#define FLAGS_MODE1 0x004E200000080E01
-#define FLAGS_MODE2 0x004E200000080401
+#define FLAGS_MODE1 0x004E200000080E01UL
+#define FLAGS_MODE2 0x004E200000080401UL
#define FLAGS_ACTIVATE 0x100
static ssize_t get_best_energy_list(char *page, int activate)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c11c8238797c..d64feb3ea0be 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -66,8 +66,8 @@
#include <asm/firmware.h>
#include <asm/eeh.h>
#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
-#include "plpar_wrappers.h"
#include "pseries.h"
int CMO_PrPSP = -1;
@@ -183,7 +183,7 @@ static void __init pseries_mpic_init_IRQ(void)
np = of_find_node_by_path("/");
naddr = of_n_addr_cells(np);
opprop = of_get_property(np, "platform-open-pic", &opplen);
- if (opprop != 0) {
+ if (opprop != NULL) {
openpic_addr = of_read_number(opprop, naddr);
printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
}
@@ -323,7 +323,7 @@ static int alloc_dispatch_logs(void)
get_paca()->lppaca_ptr->dtl_idx = 0;
/* hypervisor reads buffer length from this field */
- dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
+ dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
if (ret)
pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 306643cc9dbc..1c1771a40250 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -43,8 +43,8 @@
#include <asm/cputhreads.h>
#include <asm/xics.h>
#include <asm/dbell.h>
+#include <asm/plpar_wrappers.h>
-#include "plpar_wrappers.h"
#include "pseries.h"
#include "offline_states.h"
@@ -187,22 +187,6 @@ static int smp_pSeries_kick_cpu(int nr)
return 0;
}
-static int smp_pSeries_cpu_bootable(unsigned int nr)
-{
- /* Special case - we inhibit secondary thread startup
- * during boot if the user requests it.
- */
- if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
- if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
- return 0;
- if (smt_enabled_at_boot
- && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
- return 0;
- }
-
- return 1;
-}
-
/* Only used on systems that support multiple IPI mechanisms */
static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
{
@@ -237,7 +221,7 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
.probe = pSeries_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_xics_setup_cpu,
- .cpu_bootable = smp_pSeries_cpu_bootable,
+ .cpu_bootable = smp_generic_cpu_bootable,
};
/* This is called very early */