aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/gmap.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-04-18 13:42:05 +0200
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 09:54:56 +0200
commit18b89809881834cecd2977e6048a30c4c8f140fe (patch)
tree7542079e9cbb1b59df833981ddd005a3524178bc /arch/s390/mm/gmap.c
parents390/mm: support EDAT1 for gmap shadows (diff)
downloadlinux-dev-18b89809881834cecd2977e6048a30c4c8f140fe.tar.xz
linux-dev-18b89809881834cecd2977e6048a30c4c8f140fe.zip
s390/mm: support EDAT2 for gmap shadows
If the guest is enabled for EDAT2, we can easily create shadows for guest2 -> guest3 provided tables that make use of EDAT2. If guest2 references a 2GB page, this memory looks consecutive for guest2, but it does not have to be so for us. Therefore we have to create fake segment and page tables. This works just like EDAT1 support, so page tables are removed when the parent table (r3t table entry) is changed. We don't hve to care about: - ACCF-Validity Control in RTTE - Access-Control Bits in RTTE - Fetch-Protection Bit in RTTE - Common-Region Bit in RTTE Just like for EDAT1, all bits might be dropped and there is no guaranteed that they are active. Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/gmap.c')
-rw-r--r--arch/s390/mm/gmap.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index de7ad7bd4a48..c96bf30245c0 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1631,6 +1631,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @sgt: parent gmap address of the segment table to get shadowed
+ * @fake: sgt references contiguous guest memory block, not a sgt
*
* Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
@@ -1638,19 +1639,22 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
*
* Called with sg->mm->mmap_sem in read.
*/
-int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt)
+int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
+ int fake)
{
unsigned long raddr, origin, offset, len;
unsigned long *s_sgt, *table;
struct page *page;
int rc;
- BUG_ON(!gmap_is_shadow(sg));
+ BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
/* Allocate a shadow segment table */
page = alloc_pages(GFP_KERNEL, 2);
if (!page)
return -ENOMEM;
page->index = sgt & _REGION_ENTRY_ORIGIN;
+ if (fake)
+ page->index |= GMAP_SHADOW_FAKE_TABLE;
s_sgt = (unsigned long *) page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1673,6 +1677,12 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt)
if (sg->edat_level >= 1)
*table |= sgt & _REGION_ENTRY_PROTECT;
list_add(&page->lru, &sg->crst_list);
+ if (fake) {
+ /* nothing to protect for fake tables */
+ *table &= ~_REGION_ENTRY_INVALID;
+ spin_unlock(&sg->guest_table_lock);
+ return 0;
+ }
spin_unlock(&sg->guest_table_lock);
/* Make sgt read-only in parent gmap page table */
raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;