aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/gmap.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-06-13 10:49:04 +0200
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 09:55:20 +0200
commit01f719176f28016da1b588f6560a4eef18a98a93 (patch)
tree1ce75d8a85c078e46bd23733cf7fc098dc21b8dd /arch/s390/mm/gmap.c
parents390/mm: allow to check if a gmap shadow is valid (diff)
downloadlinux-dev-01f719176f28016da1b588f6560a4eef18a98a93.tar.xz
linux-dev-01f719176f28016da1b588f6560a4eef18a98a93.zip
s390/mm: don't fault everything in read-write in gmap_pte_op_fixup()
Let's not fault in everything in read-write but limit it to read-only where possible. When restricting access rights, we already have the required protection level in our hands. When reading from guest 2 storage (gmap_read_table), it is obviously PROT_READ. When shadowing a pte, the required protection level is given via the guest 2 provided pte. Based on an initial patch by Martin Schwidefsky. Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/gmap.c')
-rw-r--r--arch/s390/mm/gmap.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index d00e4abb559e..738d75495e56 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -811,19 +811,22 @@ static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @vmaddr: address in the host process address space
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
*
* Returns 0 if the caller can retry __gmap_translate (might fail again),
* -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
* up or connecting the gmap page table.
*/
static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
- unsigned long vmaddr)
+ unsigned long vmaddr, int prot)
{
struct mm_struct *mm = gmap->mm;
+ unsigned int fault_flags;
bool unlocked = false;
BUG_ON(gmap_is_shadow(gmap));
- if (fixup_user_fault(current, mm, vmaddr, FAULT_FLAG_WRITE, &unlocked))
+ fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
+ if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
return -EFAULT;
if (unlocked)
/* lost mmap_sem, caller has to retry __gmap_translate */
@@ -875,7 +878,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
vmaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(vmaddr))
return vmaddr;
- rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
+ rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
if (rc)
return rc;
continue;
@@ -957,7 +960,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
rc = vmaddr;
break;
}
- rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
+ rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
if (rc)
break;
}
@@ -1041,7 +1044,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
radix_tree_preload_end();
if (rc) {
kfree(rmap);
- rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
+ rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
if (rc)
return rc;
continue;
@@ -1910,10 +1913,12 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
unsigned long vmaddr, paddr;
spinlock_t *ptl;
pte_t *sptep, *tptep;
+ int prot;
int rc;
BUG_ON(!gmap_is_shadow(sg));
parent = sg->parent;
+ prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
if (!rmap)
@@ -1955,7 +1960,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
radix_tree_preload_end();
if (!rc)
break;
- rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
+ rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
if (rc)
break;
}