/* * arch/ia64/kvm/optvfault.S * optimize virtualization fault handler * * Copyright (C) 2006 Intel Co * Xuefei Xu (Anthony Xu) * Copyright (C) 2008 Intel Co * Add the support for Tukwila processors. * Xiantao Zhang */ #include #include #include #include "vti.h" #include "asm-offsets.h" #define ACCE_MOV_FROM_AR #define ACCE_MOV_FROM_RR #define ACCE_MOV_TO_RR #define ACCE_RSM #define ACCE_SSM #define ACCE_MOV_TO_PSR #define ACCE_THASH #define VMX_VPS_SYNC_READ \ add r16=VMM_VPD_BASE_OFFSET,r21; \ mov r17 = b0; \ mov r18 = r24; \ mov r19 = r25; \ mov r20 = r31; \ ;; \ {.mii; \ ld8 r16 = [r16]; \ nop 0x0; \ mov r24 = ip; \ ;; \ }; \ {.mmb; \ add r24=0x20, r24; \ mov r25 =r16; \ br.sptk.many kvm_vps_sync_read; \ }; \ mov b0 = r17; \ mov r24 = r18; \ mov r25 = r19; \ mov r31 = r20 ENTRY(kvm_vps_entry) adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 ;; ld8 r29 = [r29] ;; add r29 = r29, r30 ;; mov b0 = r29 br.sptk.many b0 END(kvm_vps_entry) /* * Inputs: * r24 : return address * r25 : vpd * r29 : scratch * */ GLOBAL_ENTRY(kvm_vps_sync_read) movl r30 = PAL_VPS_SYNC_READ ;; br.sptk.many kvm_vps_entry END(kvm_vps_sync_read) /* * Inputs: * r24 : return address * r25 : vpd * r29 : scratch * */ GLOBAL_ENTRY(kvm_vps_sync_write) movl r30 = PAL_VPS_SYNC_WRITE ;; br.sptk.many kvm_vps_entry END(kvm_vps_sync_write) /* * Inputs: * r23 : pr * r24 : guest b0 * r25 : vpd * */ GLOBAL_ENTRY(kvm_vps_resume_normal) movl r30 = PAL_VPS_RESUME_NORMAL ;; mov pr=r23,-2 br.sptk.many kvm_vps_entry END(kvm_vps_resume_normal) /* * Inputs: * r23 : pr * r24 : guest b0 * r25 : vpd * r17 : isr */ GLOBAL_ENTRY(kvm_vps_resume_handler) movl r30 = PAL_VPS_RESUME_HANDLER ;; ld8 r26=[r25] shr r17=r17,IA64_ISR_IR_BIT ;; dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE mov pr=r23,-2 br.sptk.many kvm_vps_entry END(kvm_vps_resume_handler) //mov r1=ar3 GLOBAL_ENTRY(kvm_asm_mov_from_ar) #ifndef ACCE_MOV_FROM_AR br.many kvm_virtualization_fault_back #endif add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 extr.u r17=r25,6,7 ;; ld8 r18=[r18] mov r19=ar.itc mov r24=b0 ;; add r19=r19,r18 addl r20=@gprel(asm_mov_to_reg),gp ;; st8 [r16] = r19 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 shladd r17=r17,4,r20 ;; mov b0=r17 br.sptk.few b0 ;; END(kvm_asm_mov_from_ar) /* * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC * clock as it's source for emulating the ITC. This version will be * copied on top of the original version if the host is determined to * be an SN2. */ GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2) add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 movl r19 = (KVM_VMM_BASE+(1<arch.vrr[0]'s addr adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta ;; shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr ld8 r17=[r16] // get PTA mov r26=1 ;; extr.u r29=r17,2,6 // get pta.size ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value ;; mov b0=r24 //Fallback to C if pta.vf is set tbit.nz p6,p0=r17, 8 ;; (p6) mov r24=EVENT_THASH (p6) br.cond.dpnt.many kvm_virtualization_fault_back extr.u r28=r28,2,6 // get rr.ps shl r22=r26,r29 // 1UL << pta.size ;; shr.u r23=r19,r28 // vaddr >> rr.ps adds r26=3,r29 // pta.size + 3 shl r27=r17,3 // pta << 3 ;; shl r23=r23,3 // (vaddr >> rr.ps) << 3 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) movl r16=7<<61 ;; adds r22=-1,r22 // (1UL << pta.size) - 1 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<>(pta.size + 3))<