aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firmware/qcom_scm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-17 20:14:10 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-17 20:14:10 -0800
commit4f88bd23baab4335e96d38828ef38124ec717eb9 (patch)
tree97f760c96b8799ee57a013f60cb76e781ddf1e62 /drivers/firmware/qcom_scm.c
parentMerge tag 'rpmsg-v4.15' of git://github.com/andersson/remoteproc (diff)
parentremoteproc: qcom: Fix error handling paths in order to avoid memory leaks (diff)
downloadlinux-dev-4f88bd23baab4335e96d38828ef38124ec717eb9.tar.xz
linux-dev-4f88bd23baab4335e96d38828ef38124ec717eb9.zip
Merge tag 'rproc-v4.15' of git://github.com/andersson/remoteproc
Pull remoteproc updates from Bjorn Andersson: "This adds an interface for configuring Qualcomm's "secure SMMU" and adds support for booting the modem Hexagon on MSM8996. Two new debugfs entries are added in the remoteproc core to introspect the list of memory carveouts and the loaded resource table" * tag 'rproc-v4.15' of git://github.com/andersson/remoteproc: remoteproc: qcom: Fix error handling paths in order to avoid memory leaks remoteproc: qcom: Drop pr_err in q6v5_xfer_mem_ownership() remoteproc: debug: add carveouts list dump feature remoteproc: debug: add resource table dump feature remoteproc: qcom: Add support for mss remoteproc on msm8996 remoteproc: qcom: Make secure world call for mem ownership switch remoteproc: qcom: refactor mss fw image loading sequence firmware: scm: Add new SCM call API for switching memory ownership
Diffstat (limited to 'drivers/firmware/qcom_scm.c')
-rw-r--r--drivers/firmware/qcom_scm.c95
1 files changed, 95 insertions, 0 deletions
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 9064e559a01f..af4c75217ea6 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -47,6 +47,19 @@ struct qcom_scm {
u64 dload_mode_addr;
};
+struct qcom_scm_current_perm_info {
+ __le32 vmid;
+ __le32 perm;
+ __le64 ctx;
+ __le32 ctx_size;
+ __le32 unused;
+};
+
+struct qcom_scm_mem_map_info {
+ __le64 mem_addr;
+ __le64 mem_size;
+};
+
static struct qcom_scm *__scm;
static int qcom_scm_clk_enable(void)
@@ -415,6 +428,88 @@ int qcom_scm_set_remote_state(u32 state, u32 id)
}
EXPORT_SYMBOL(qcom_scm_set_remote_state);
+/**
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
+ * @mem_addr: mem region whose ownership need to be reassigned
+ * @mem_sz: size of the region.
+ * @srcvm: vmid for current set of owners, each set bit in
+ * flag indicate a unique owner
+ * @newvm: array having new owners and corrsponding permission
+ * flags
+ * @dest_cnt: number of owners in next set.
+ *
+ * Return negative errno on failure, 0 on success, with @srcvm updated.
+ */
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ unsigned int *srcvm,
+ struct qcom_scm_vmperm *newvm, int dest_cnt)
+{
+ struct qcom_scm_current_perm_info *destvm;
+ struct qcom_scm_mem_map_info *mem_to_map;
+ phys_addr_t mem_to_map_phys;
+ phys_addr_t dest_phys;
+ phys_addr_t ptr_phys;
+ size_t mem_to_map_sz;
+ size_t dest_sz;
+ size_t src_sz;
+ size_t ptr_sz;
+ int next_vm;
+ __le32 *src;
+ void *ptr;
+ int ret;
+ int len;
+ int i;
+
+ src_sz = hweight_long(*srcvm) * sizeof(*src);
+ mem_to_map_sz = sizeof(*mem_to_map);
+ dest_sz = dest_cnt * sizeof(*destvm);
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+ ALIGN(dest_sz, SZ_64);
+
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ /* Fill source vmid detail */
+ src = ptr;
+ len = hweight_long(*srcvm);
+ for (i = 0; i < len; i++) {
+ src[i] = cpu_to_le32(ffs(*srcvm) - 1);
+ *srcvm ^= 1 << (ffs(*srcvm) - 1);
+ }
+
+ /* Fill details of mem buff to map */
+ mem_to_map = ptr + ALIGN(src_sz, SZ_64);
+ mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
+ mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
+ mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
+
+ next_vm = 0;
+ /* Fill details of next vmid detail */
+ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ for (i = 0; i < dest_cnt; i++) {
+ destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
+ destvm[i].perm = cpu_to_le32(newvm[i].perm);
+ destvm[i].ctx = 0;
+ destvm[i].ctx_size = 0;
+ next_vm |= BIT(newvm[i].vmid);
+ }
+
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+ ptr_phys, src_sz, dest_phys, dest_sz);
+ dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+ if (ret) {
+ dev_err(__scm->dev,
+ "Assign memory protection call failed %d.\n", ret);
+ return -EINVAL;
+ }
+
+ *srcvm = next_vm;
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_assign_mem);
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;