aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-04-12 13:32:25 +0200
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-05-04 10:57:38 +0200
commit4a5e7e381f9d7ab92fd8f9280683dae11698292e (patch)
treeeff26193d2c08dda6685db3ec9ddaea1cd85fa16 /arch/s390/kvm
parentMerge tag 'kvms390-20160420' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into next (diff)
downloadlinux-dev-4a5e7e381f9d7ab92fd8f9280683dae11698292e.tar.xz
linux-dev-4a5e7e381f9d7ab92fd8f9280683dae11698292e.zip
KVM: s390: cmma: don't check entry content
We should never inject an exception after we manually rewound the PSW (to retry the ESSA instruction in this case). This will mess up the PSW. So this never worked and therefore never really triggered. Looking at the details, we don't even have to perform any validity checks. 1. Bits 52-63 of an entry are stored as 0 by the hardware. 2. We are dealing with absolute addresses but only check for the prefix starting at address 0. This isn't correct and doesn't make much sense, cpus could still zap the prefix of other cpus. But as prefix pages cannot be swapped out without a notifier being called for the affected VCPU, a zap can never remove a protected prefix. Reviewed-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/priv.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0a1591d3d25d..3561119f99d6 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -744,7 +744,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
{
/* entries expected to be 1FF */
int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
- unsigned long *cbrlo, cbrle;
+ unsigned long *cbrlo;
struct gmap *gmap;
int i;
@@ -765,17 +765,9 @@ static int handle_essa(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
down_read(&gmap->mm->mmap_sem);
- for (i = 0; i < entries; ++i) {
- cbrle = cbrlo[i];
- if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
- /* invalid entry */
- break;
- /* try to free backing */
- __gmap_zap(gmap, cbrle);
- }
+ for (i = 0; i < entries; ++i)
+ __gmap_zap(gmap, cbrlo[i]);
up_read(&gmap->mm->mmap_sem);
- if (i < entries)
- return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
return 0;
}