aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/gaccess.c
diff options
context:
space:
mode:
authorAlexander Yarygin <yarygin@linux.vnet.ibm.com>2015-03-03 14:26:14 +0300
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-03-17 16:24:38 +0100
commitdd9e5b7bdba3250c075a212ff632d31edfa91ae7 (patch)
tree06666c57566175f67091bf509c36c678fa8e82a6 /arch/s390/kvm/gaccess.c
parentKVM: s390: cleanup jump lables in kvm_arch_init_vm (diff)
downloadlinux-dev-dd9e5b7bdba3250c075a212ff632d31edfa91ae7.tar.xz
linux-dev-dd9e5b7bdba3250c075a212ff632d31edfa91ae7.zip
KVM: s390: Fix low-address protection for real addresses
The kvm_s390_check_low_addr_protection() function is used only with real addresses. According to the POP (the "Low-Address Protection" paragraph in chapter 3), if the effective address is real or absolute, the low-address protection procedure should raise a PROTECTION exception only when the low-address protection is enabled in the control register 0 and the address is low. This patch removes ASCE checks from the function and renames it to better reflect its behavior. Cc: Thomas Huth <thuth@linux.vnet.ibm.com> Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com> Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm/gaccess.c')
-rw-r--r--arch/s390/kvm/gaccess.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 633fe9bd75a9..c230904429cc 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -697,28 +697,29 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
}
/**
- * kvm_s390_check_low_addr_protection - check for low-address protection
- * @ga: Guest address
+ * kvm_s390_check_low_addr_prot_real - check for low-address protection
+ * @gra: Guest real address
*
* Checks whether an address is subject to low-address protection and set
* up vcpu->arch.pgm accordingly if necessary.
*
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
*/
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
+ union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
- if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
+ if (!ctlreg0.lap || !is_low_address(gra))
return 0;
memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
tec_bits->fsi = FSI_STORE;
tec_bits->as = psw_bits(*psw).as;
- tec_bits->addr = ga >> PAGE_SHIFT;
+ tec_bits->addr = gra >> PAGE_SHIFT;
pgm->code = PGM_PROTECTION;
return pgm->code;