aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/cppc_acpi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/cppc_acpi.c')
-rw-r--r--drivers/acpi/cppc_acpi.c237
1 files changed, 191 insertions, 46 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6730f965b379..8adac69dba3d 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -39,6 +39,7 @@
#include <linux/cpufreq.h>
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <acpi/cppc_acpi.h>
/*
@@ -63,58 +64,140 @@ static struct mbox_chan *pcc_channel;
static void __iomem *pcc_comm_addr;
static u64 comm_base_addr;
static int pcc_subspace_idx = -1;
-static u16 pcc_cmd_delay;
static bool pcc_channel_acquired;
+static ktime_t deadline;
+static unsigned int pcc_mpar, pcc_mrtt;
+
+/* pcc mapped address + header size + offset within PCC subspace */
+#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs))
/*
* Arbitrary Retries in case the remote processor is slow to respond
- * to PCC commands.
+ * to PCC commands. Keeping it high enough to cover emulators where
+ * the processors run painfully slow.
*/
#define NUM_RETRIES 500
+static int check_pcc_chan(void)
+{
+ int ret = -EIO;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
+ ktime_t next_deadline = ktime_add(ktime_get(), deadline);
+
+ /* Retry in case the remote processor was too slow to catch up. */
+ while (!ktime_after(ktime_get(), next_deadline)) {
+ /*
+ * Per spec, prior to boot the PCC space wil be initialized by
+ * platform and should have set the command completion bit when
+ * PCC can be used by OSPM
+ */
+ if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
+ ret = 0;
+ break;
+ }
+ /*
+ * Reducing the bus traffic in case this loop takes longer than
+ * a few retries.
+ */
+ udelay(3);
+ }
+
+ return ret;
+}
+
static int send_pcc_cmd(u16 cmd)
{
- int retries, result = -EIO;
- struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
+ int ret = -EIO;
struct acpi_pcct_shared_memory *generic_comm_base =
(struct acpi_pcct_shared_memory *) pcc_comm_addr;
- u32 cmd_latency = pcct_ss->latency;
+ static ktime_t last_cmd_cmpl_time, last_mpar_reset;
+ static int mpar_count;
+ unsigned int time_delta;
- /* Min time OS should wait before sending next command. */
- udelay(pcc_cmd_delay);
+ /*
+ * For CMD_WRITE we know for a fact the caller should have checked
+ * the channel before writing to PCC space
+ */
+ if (cmd == CMD_READ) {
+ ret = check_pcc_chan();
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Handle the Minimum Request Turnaround Time(MRTT)
+ * "The minimum amount of time that OSPM must wait after the completion
+ * of a command before issuing the next command, in microseconds"
+ */
+ if (pcc_mrtt) {
+ time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
+ if (pcc_mrtt > time_delta)
+ udelay(pcc_mrtt - time_delta);
+ }
+
+ /*
+ * Handle the non-zero Maximum Periodic Access Rate(MPAR)
+ * "The maximum number of periodic requests that the subspace channel can
+ * support, reported in commands per minute. 0 indicates no limitation."
+ *
+ * This parameter should be ideally zero or large enough so that it can
+ * handle maximum number of requests that all the cores in the system can
+ * collectively generate. If it is not, we will follow the spec and just
+ * not send the request to the platform after hitting the MPAR limit in
+ * any 60s window
+ */
+ if (pcc_mpar) {
+ if (mpar_count == 0) {
+ time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
+ if (time_delta < 60 * MSEC_PER_SEC) {
+ pr_debug("PCC cmd not sent due to MPAR limit");
+ return -EIO;
+ }
+ last_mpar_reset = ktime_get();
+ mpar_count = pcc_mpar;
+ }
+ mpar_count--;
+ }
/* Write to the shared comm region. */
- writew(cmd, &generic_comm_base->command);
+ writew_relaxed(cmd, &generic_comm_base->command);
/* Flip CMD COMPLETE bit */
- writew(0, &generic_comm_base->status);
+ writew_relaxed(0, &generic_comm_base->status);
/* Ring doorbell */
- result = mbox_send_message(pcc_channel, &cmd);
- if (result < 0) {
+ ret = mbox_send_message(pcc_channel, &cmd);
+ if (ret < 0) {
pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
- cmd, result);
- return result;
+ cmd, ret);
+ return ret;
}
- /* Wait for a nominal time to let platform process command. */
- udelay(cmd_latency);
-
- /* Retry in case the remote processor was too slow to catch up. */
- for (retries = NUM_RETRIES; retries > 0; retries--) {
- if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
- result = 0;
- break;
- }
+ /*
+ * For READs we need to ensure the cmd completed to ensure
+ * the ensuing read()s can proceed. For WRITEs we dont care
+ * because the actual write()s are done before coming here
+ * and the next READ or WRITE will check if the channel
+ * is busy/free at the entry of this call.
+ *
+ * If Minimum Request Turnaround Time is non-zero, we need
+ * to record the completion time of both READ and WRITE
+ * command for proper handling of MRTT, so we need to check
+ * for pcc_mrtt in addition to CMD_READ
+ */
+ if (cmd == CMD_READ || pcc_mrtt) {
+ ret = check_pcc_chan();
+ if (pcc_mrtt)
+ last_cmd_cmpl_time = ktime_get();
}
- mbox_client_txdone(pcc_channel, result);
- return result;
+ mbox_client_txdone(pcc_channel, ret);
+ return ret;
}
static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
{
- if (ret)
+ if (ret < 0)
pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
*(u16 *)msg, ret);
else
@@ -306,6 +389,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
{
struct acpi_pcct_hw_reduced *cppc_ss;
unsigned int len;
+ u64 usecs_lat;
if (pcc_subspace_idx >= 0) {
pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
@@ -335,7 +419,16 @@ static int register_pcc_channel(int pcc_subspace_idx)
*/
comm_base_addr = cppc_ss->base_address;
len = cppc_ss->length;
- pcc_cmd_delay = cppc_ss->min_turnaround_time;
+
+ /*
+ * cppc_ss->latency is just a Nominal value. In reality
+ * the remote processor could be much slower to reply.
+ * So add an arbitrary amount of wait on top of Nominal.
+ */
+ usecs_lat = NUM_RETRIES * cppc_ss->latency;
+ deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
+ pcc_mrtt = cppc_ss->min_turnaround_time;
+ pcc_mpar = cppc_ss->max_access_rate;
pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
if (!pcc_comm_addr) {
@@ -546,29 +639,74 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
}
EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
-static u64 get_phys_addr(struct cpc_reg *reg)
-{
- /* PCC communication addr space begins at byte offset 0x8. */
- if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
- return (u64)comm_base_addr + 0x8 + reg->address;
- else
- return reg->address;
-}
+/*
+ * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
+ * as fast as possible. We have already mapped the PCC subspace during init, so
+ * we can directly write to it.
+ */
-static void cpc_read(struct cpc_reg *reg, u64 *val)
+static int cpc_read(struct cpc_reg *reg, u64 *val)
{
- u64 addr = get_phys_addr(reg);
+ int ret_val = 0;
- acpi_os_read_memory((acpi_physical_address)addr,
- val, reg->bit_width);
+ *val = 0;
+ if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ void __iomem *vaddr = GET_PCC_VADDR(reg->address);
+
+ switch (reg->bit_width) {
+ case 8:
+ *val = readb_relaxed(vaddr);
+ break;
+ case 16:
+ *val = readw_relaxed(vaddr);
+ break;
+ case 32:
+ *val = readl_relaxed(vaddr);
+ break;
+ case 64:
+ *val = readq_relaxed(vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot read %u bit width from PCC\n",
+ reg->bit_width);
+ ret_val = -EFAULT;
+ }
+ } else
+ ret_val = acpi_os_read_memory((acpi_physical_address)reg->address,
+ val, reg->bit_width);
+ return ret_val;
}
-static void cpc_write(struct cpc_reg *reg, u64 val)
+static int cpc_write(struct cpc_reg *reg, u64 val)
{
- u64 addr = get_phys_addr(reg);
+ int ret_val = 0;
- acpi_os_write_memory((acpi_physical_address)addr,
- val, reg->bit_width);
+ if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ void __iomem *vaddr = GET_PCC_VADDR(reg->address);
+
+ switch (reg->bit_width) {
+ case 8:
+ writeb_relaxed(val, vaddr);
+ break;
+ case 16:
+ writew_relaxed(val, vaddr);
+ break;
+ case 32:
+ writel_relaxed(val, vaddr);
+ break;
+ case 64:
+ writeq_relaxed(val, vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot write %u bit width to PCC\n",
+ reg->bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ } else
+ ret_val = acpi_os_write_memory((acpi_physical_address)reg->address,
+ val, reg->bit_width);
+ return ret_val;
}
/**
@@ -604,7 +742,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
(ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
(nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ)) {
+ if (send_pcc_cmd(CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -662,7 +800,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
(reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ)) {
+ if (send_pcc_cmd(CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -713,6 +851,13 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
spin_lock(&pcc_lock);
+ /* If this is PCC reg, check if channel is free before writing */
+ if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ ret = check_pcc_chan();
+ if (ret)
+ goto busy_channel;
+ }
+
/*
* Skip writing MIN/MAX until Linux knows how to come up with
* useful values.
@@ -722,10 +867,10 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
/* Is this a PCC reg ?*/
if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
/* Ring doorbell so Remote can get our perf request. */
- if (send_pcc_cmd(CMD_WRITE))
+ if (send_pcc_cmd(CMD_WRITE) < 0)
ret = -EIO;
}
-
+busy_channel:
spin_unlock(&pcc_lock);
return ret;